hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a97400b9f8653bf8e2e4366b42d6746d2d0250eb
| 38,782
|
py
|
Python
|
lightly/openapi_generated/swagger_client/api/predictions_api.py
|
CodeGuy-007/lightly
|
64143fe8a477c04288009c65fa1265cef8aa48f8
|
[
"MIT"
] | null | null | null |
lightly/openapi_generated/swagger_client/api/predictions_api.py
|
CodeGuy-007/lightly
|
64143fe8a477c04288009c65fa1265cef8aa48f8
|
[
"MIT"
] | null | null | null |
lightly/openapi_generated/swagger_client/api/predictions_api.py
|
CodeGuy-007/lightly
|
64143fe8a477c04288009c65fa1265cef8aa48f8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lightly.openapi_generated.swagger_client.api_client import ApiClient
class PredictionsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_or_update_prediction_by_sample_id(self, body, dataset_id, sample_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""create_or_update_prediction_by_sample_id # noqa: E501
Create/Update all the prediction singletons for a sampleId in the order/index of them being discovered # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_by_sample_id(body, dataset_id, sample_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[PredictionSingleton] body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID sample_id: ObjectId of the sample (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_or_update_prediction_by_sample_id_with_http_info(body, dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
else:
(data) = self.create_or_update_prediction_by_sample_id_with_http_info(body, dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
return data
def create_or_update_prediction_by_sample_id_with_http_info(self, body, dataset_id, sample_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""create_or_update_prediction_by_sample_id # noqa: E501
Create/Update all the prediction singletons for a sampleId in the order/index of them being discovered # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_by_sample_id_with_http_info(body, dataset_id, sample_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[PredictionSingleton] body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID sample_id: ObjectId of the sample (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'dataset_id', 'sample_id', 'prediction_uuid_timestamp'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_or_update_prediction_by_sample_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_or_update_prediction_by_sample_id`") # noqa: E501
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `create_or_update_prediction_by_sample_id`") # noqa: E501
# verify the required parameter 'sample_id' is set
if self.api_client.client_side_validation and ('sample_id' not in params or
params['sample_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `sample_id` when calling `create_or_update_prediction_by_sample_id`") # noqa: E501
# verify the required parameter 'prediction_uuid_timestamp' is set
if self.api_client.client_side_validation and ('prediction_uuid_timestamp' not in params or
params['prediction_uuid_timestamp'] is None): # noqa: E501
raise ValueError("Missing the required parameter `prediction_uuid_timestamp` when calling `create_or_update_prediction_by_sample_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
if 'sample_id' in params:
path_params['sampleId'] = params['sample_id'] # noqa: E501
query_params = []
if 'prediction_uuid_timestamp' in params:
query_params.append(('predictionUUIDTimestamp', params['prediction_uuid_timestamp'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/samples/{sampleId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateEntityResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_or_update_prediction_task_schema_by_dataset_id(self, body, dataset_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""create_or_update_prediction_task_schema_by_dataset_id # noqa: E501
Creates/updates a prediction task schema with the task name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_task_schema_by_dataset_id(body, dataset_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PredictionTaskSchema body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(body, dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
else:
(data) = self.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(body, dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
return data
def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, body, dataset_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""create_or_update_prediction_task_schema_by_dataset_id # noqa: E501
Creates/updates a prediction task schema with the task name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(body, dataset_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PredictionTaskSchema body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'dataset_id', 'prediction_uuid_timestamp'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_or_update_prediction_task_schema_by_dataset_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_or_update_prediction_task_schema_by_dataset_id`") # noqa: E501
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `create_or_update_prediction_task_schema_by_dataset_id`") # noqa: E501
# verify the required parameter 'prediction_uuid_timestamp' is set
if self.api_client.client_side_validation and ('prediction_uuid_timestamp' not in params or
params['prediction_uuid_timestamp'] is None): # noqa: E501
raise ValueError("Missing the required parameter `prediction_uuid_timestamp` when calling `create_or_update_prediction_task_schema_by_dataset_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
query_params = []
if 'prediction_uuid_timestamp' in params:
query_params.append(('predictionUUIDTimestamp', params['prediction_uuid_timestamp'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/tasks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateEntityResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_prediction_by_sample_id(self, dataset_id, sample_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""get_prediction_by_sample_id # noqa: E501
Get all prediction singletons of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_by_sample_id(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID sample_id: ObjectId of the sample (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:return: PredictionSingletons
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
else:
(data) = self.get_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
return data
def get_prediction_by_sample_id_with_http_info(self, dataset_id, sample_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""get_prediction_by_sample_id # noqa: E501
Get all prediction singletons of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID sample_id: ObjectId of the sample (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:return: PredictionSingletons
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dataset_id', 'sample_id', 'prediction_uuid_timestamp'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_by_sample_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `get_prediction_by_sample_id`") # noqa: E501
# verify the required parameter 'sample_id' is set
if self.api_client.client_side_validation and ('sample_id' not in params or
params['sample_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `sample_id` when calling `get_prediction_by_sample_id`") # noqa: E501
# verify the required parameter 'prediction_uuid_timestamp' is set
if self.api_client.client_side_validation and ('prediction_uuid_timestamp' not in params or
params['prediction_uuid_timestamp'] is None): # noqa: E501
raise ValueError("Missing the required parameter `prediction_uuid_timestamp` when calling `get_prediction_by_sample_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
if 'sample_id' in params:
path_params['sampleId'] = params['sample_id'] # noqa: E501
query_params = []
if 'prediction_uuid_timestamp' in params:
query_params.append(('predictionUUIDTimestamp', params['prediction_uuid_timestamp'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/samples/{sampleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PredictionSingletons', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_prediction_task_schema_by_task_name(self, dataset_id, prediction_uuid_timestamp, task_name, **kwargs): # noqa: E501
"""get_prediction_task_schema_by_task_name # noqa: E501
Get a prediction task schemas named taskName for a datasetId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schema_by_task_name(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:param TaskName task_name: The prediction task name for which one wants to list the predictions (required)
:return: PredictionTaskSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501
else:
(data) = self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501
return data
def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id, prediction_uuid_timestamp, task_name, **kwargs): # noqa: E501
"""get_prediction_task_schema_by_task_name # noqa: E501
Get a prediction task schemas named taskName for a datasetId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:param TaskName task_name: The prediction task name for which one wants to list the predictions (required)
:return: PredictionTaskSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dataset_id', 'prediction_uuid_timestamp', 'task_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_task_schema_by_task_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `get_prediction_task_schema_by_task_name`") # noqa: E501
# verify the required parameter 'prediction_uuid_timestamp' is set
if self.api_client.client_side_validation and ('prediction_uuid_timestamp' not in params or
params['prediction_uuid_timestamp'] is None): # noqa: E501
raise ValueError("Missing the required parameter `prediction_uuid_timestamp` when calling `get_prediction_task_schema_by_task_name`") # noqa: E501
# verify the required parameter 'task_name' is set
if self.api_client.client_side_validation and ('task_name' not in params or
params['task_name'] is None): # noqa: E501
raise ValueError("Missing the required parameter `task_name` when calling `get_prediction_task_schema_by_task_name`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
if 'task_name' in params:
path_params['taskName'] = params['task_name'] # noqa: E501
query_params = []
if 'prediction_uuid_timestamp' in params:
query_params.append(('predictionUUIDTimestamp', params['prediction_uuid_timestamp'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/tasks/{taskName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PredictionTaskSchema', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_prediction_task_schemas_by_dataset_id(self, dataset_id, **kwargs): # noqa: E501
"""get_prediction_task_schemas_by_dataset_id # noqa: E501
Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schemas_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset.
:return: list[PredictionTaskSchema]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
else:
(data) = self.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
return data
def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id, **kwargs): # noqa: E501
"""get_prediction_task_schemas_by_dataset_id # noqa: E501
Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset.
:return: list[PredictionTaskSchema]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dataset_id', 'prediction_uuid_timestamp'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_task_schemas_by_dataset_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `get_prediction_task_schemas_by_dataset_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
query_params = []
if 'prediction_uuid_timestamp' in params:
query_params.append(('predictionUUIDTimestamp', params['prediction_uuid_timestamp'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/tasks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PredictionTaskSchema]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_predictions_by_dataset_id(self, dataset_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""get_predictions_by_dataset_id # noqa: E501
Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_predictions_by_dataset_id(dataset_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:param TaskName task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name
:return: list[PredictionSingletons]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
else:
(data) = self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
return data
def get_predictions_by_dataset_id_with_http_info(self, dataset_id, prediction_uuid_timestamp, **kwargs): # noqa: E501
"""get_predictions_by_dataset_id # noqa: E501
Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param Timestamp prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:param TaskName task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name
:return: list[PredictionSingletons]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dataset_id', 'prediction_uuid_timestamp', 'task_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_predictions_by_dataset_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `get_predictions_by_dataset_id`") # noqa: E501
# verify the required parameter 'prediction_uuid_timestamp' is set
if self.api_client.client_side_validation and ('prediction_uuid_timestamp' not in params or
params['prediction_uuid_timestamp'] is None): # noqa: E501
raise ValueError("Missing the required parameter `prediction_uuid_timestamp` when calling `get_predictions_by_dataset_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
query_params = []
if 'prediction_uuid_timestamp' in params:
query_params.append(('predictionUUIDTimestamp', params['prediction_uuid_timestamp'])) # noqa: E501
if 'task_name' in params:
query_params.append(('taskName', params['task_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/samples', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PredictionSingletons]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 55.323823
| 337
| 0.667681
| 4,786
| 38,782
| 5.139783
| 0.048475
| 0.040652
| 0.0748
| 0.036587
| 0.974186
| 0.971625
| 0.968535
| 0.963129
| 0.958494
| 0.954266
| 0
| 0.015032
| 0.258986
| 38,782
| 700
| 338
| 55.402857
| 0.840942
| 0.397607
| 0
| 0.791557
| 0
| 0
| 0.244512
| 0.118414
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034301
| false
| 0
| 0.010554
| 0
| 0.094987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8d1b6b2a336d0d6d21b1cb23ba26ac2f090134e2
| 15,512
|
py
|
Python
|
utils/attr_computing.py
|
GlowingHorse/Class-Discriminative-Vis
|
c4dec263f13225eed8598544b46c984784953c50
|
[
"MIT"
] | 2
|
2020-06-25T15:35:19.000Z
|
2020-07-08T11:14:46.000Z
|
utils/attr_computing.py
|
GlowingHorse/class-discriminative-vis
|
c4dec263f13225eed8598544b46c984784953c50
|
[
"MIT"
] | 1
|
2020-06-25T15:59:54.000Z
|
2020-10-23T06:09:39.000Z
|
utils/attr_computing.py
|
GlowingHorse/Class-Discriminative-Vis
|
c4dec263f13225eed8598544b46c984784953c50
|
[
"MIT"
] | null | null | null |
"""
The attr_computing.py
provides two methods for computing channel Shapley
and transform them to feature attributions.
"""
import numpy as np
import tensorflow as tf
import lucid.optvis.render as render
from lucid.misc.gradient_override import gradient_override_map
def compute_shap(img, model, attr_class, layers,
flag1, flag_read_attr=True,
iter_num=2 ** 8, labels=None, save_directory=None):
"""
Using sampling based Shapley method to compute feature attributions
"""
with tf.Graph().as_default(), tf.Session() as sess, gradient_override_map({}):
# img = tf.image.resize_image_with_crop_or_pad(img, model.image_shape[0], model.image_shape[0])
# imgnp = sess.run(img)
# imgnp = imgnp.reshape(224, 224, 3)
# plt.imsave("./doghead224.jpeg", imgnp)
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
# grads_cam_T = [T(layer) for layer in layers]
# logit = T("softmax2_pre_activation")[0]
# score = T("output2")[0, labels.index(attr_class)]
logit = T("softmax2_pre_activation")[0]
AM_T = list(range(len(layers)))
AM_T_reverse = list(range(len(layers)))
channel_attr_list = list(range(len(layers)))
kept_channel_list = list(range(len(layers)))
kept_channel_list_reverse = list(range(len(layers)))
ori_logit = logit.eval()
y_label = np.zeros_like(ori_logit)
y_label[labels.index(attr_class)] = 1
# index_class_logit = ori_logit[labels.index(attr_class)]
# detected_label_index = ori_logit.argmax()
# print("detected label index: {}, real label index: {}, label name: {}"
# .format(detected_label_index, labels.index(attr_class), attr_class))
for i_wanted_layer in range(len(layers)):
layer = layers[i_wanted_layer]
acts = T(layer).eval()
acts_shape = list(acts.shape)
# part_name = "import/{}:0".format(layer)
# t_part_input = tf.placeholder(acts.dtype, acts_shape)
# T_part = import_part_model(model, t_part_input, part_name)
# part_logit = T_part("softmax2_pre_activation")[0]
n_features = acts.shape[-1]
if not flag_read_attr:
result = np.zeros((1, n_features))
run_shape = acts_shape.copy()
# run_shape = np.delete(run_shape, -1).tolist()
# run_shape.insert(-1, -1)
reconstruction_shape = [1, acts_shape[-1]]
for r in range(iter_num):
p = np.random.permutation(n_features)
x = acts.copy().reshape(run_shape)
y = None
for i in p:
if y is None:
y = logit.eval({T(layer): x})
# y = model.predict(x.reshape(acts_shape))
x[..., i] = 0
y0 = logit.eval({T(layer): x})
# print("Ori logit score: {}, new logit score: {}"
# .format(index_class_logit, y0[labels.index(attr_class)]))
# y0 = model.predict(x.reshape(acts_shape))
assert y0.shape == y_label.shape, y0.shape
prediction_delta = np.sum((y - y0) * y_label)
# if i == 139:
# print("AM 139: attr is {}".format(prediction_delta))
result[:, i] += prediction_delta
y = y0
attr = np.squeeze(
(result.copy() / iter_num).reshape(reconstruction_shape).astype(np.float32))
np.savetxt(save_directory + "/{}_{}_{}.txt".format(flag1, layer, attr_class), attr)
else:
attr = np.loadtxt(save_directory + "/{}_{}_{}.txt".format(flag1, layer, attr_class)).astype(np.float32)
# arg_attr = attr.argsort()[::-1][:]
# shap_attr_trans = np.transpose(shap_attr, (2, 0, 1))
# acts_squeeze_trans = np.transpose(acts_squeeze, (2, 0, 1))
'''
# # Use it for debug the attribution maps
# sort_AMs_idx_b2s = np.argsort(-attr)
# sort_AMs_idx_s2b = np.argsort(attr)
# for i_sort_AMs in range(20):
# if i_sort_AMs < 10:
# AM_backup_idx = sort_AMs_idx_b2s[i_sort_AMs]
# print("big shap value: {:+.3f} in channel {} of all {}"
# .format(attr[AM_backup_idx], AM_backup_idx, acts.shape[-1]))
# print(" for class: {}, image No:{}"
# .format(attr_class, i_sort_AMs))
# else:
# AM_backup_idx = sort_AMs_idx_s2b[i_sort_AMs-10]
# print("small shap value: {:+.3f} in channel {} of all {}"
# .format(attr[AM_backup_idx], AM_backup_idx, acts.shape[-1]))
# print(" for class: {}, image No:{}"
# .format(attr_class, i_sort_AMs-10))
# AM_backup = acts[..., AM_backup_idx]
# AM_backup = AM_backup.reshape([AM_backup.shape[-1], AM_backup.shape[-1]])
# AM_backup = resize(AM_backup, (model.image_shape[0], model.image_shape[1]), order=1,
# mode='constant', anti_aliasing=False)
# AM_backup = AM_backup / AM_backup.max() * 255
# resize_show(AM_backup, xi=img)
# kept_channel_idx = np.squeeze(np.argwhere(attr > 0))
'''
kept_channel_idx = np.squeeze(np.argwhere(attr > np.nanmean(np.where(attr > 0, attr, np.nan))))
acts_squeeze = np.squeeze(acts)
attr_temp = np.squeeze(attr).astype(np.float32)
# print(np.count_nonzero(clear_channel_idx))
channel_attr_list[i_wanted_layer] = attr_temp
AM_T[i_wanted_layer] = acts_squeeze[..., kept_channel_idx]
kept_channel_list[i_wanted_layer] = kept_channel_idx
kept_channel_idx = np.squeeze(np.argwhere(attr < np.nanmean(np.where(attr < 0, attr, np.nan))))
AM_T_reverse[i_wanted_layer] = acts_squeeze[..., kept_channel_idx]
kept_channel_list_reverse[i_wanted_layer] = kept_channel_idx
# test_AM = acts_squeeze * clear_channel_idx * attr_temp
# test_AM = np.sum(np.transpose(test_AM, (2, 0, 1)), axis=0)
# acts_squeeze_trans = np.transpose(AM_T[i_wanted_layer], (2, 0, 1))
# acts_squeeze_trans_sum = np.sum(acts_squeeze_trans, axis=0)
AM_list = AM_T
logit_list = sess.run([logit])[0]
return [AM_list, AM_T_reverse], logit_list, channel_attr_list, \
[kept_channel_list, kept_channel_list_reverse]
def compute_igsg(img, model, attr_class, layers,
flag1, flag_read_attr=True,
iter_num=100, SG_path=False,
labels=None, save_directory=None):
"""
Using Aumann Shapley values as feature attributions
"""
with tf.Graph().as_default(), tf.Session() as sess, gradient_override_map({}):
# img = tf.image.resize_image_with_crop_or_pad(img, model.image_shape[0], model.image_shape[0])
# imgnp = sess.run(img)
# imgnp = imgnp.reshape(224, 224, 3)
# plt.imsave("./doghead224.jpeg", imgnp)
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
# grads_cam_T = [T(layer) for layer in layers]
# logit = T("softmax2_pre_activation")[0]
# logit = T("output2")[0]
# score = T("output2")[0, labels.index(attr_class)]
logit = T("softmax2_pre_activation")[0]
logit4grad = T("softmax2_pre_activation")[0, labels.index(attr_class)]
AM_T = list(range(len(layers)))
AM_T_reverse = list(range(len(layers)))
channel_attr_list = list(range(len(layers)))
kept_channel_list = list(range(len(layers)))
kept_channel_list_reverse = list(range(len(layers)))
for i_wanted_layer in range(len(layers)):
layer = layers[i_wanted_layer]
acts = T(layer).eval()
attr = np.zeros(acts.shape[1:])
t_grad = tf.gradients([logit4grad], [T(layer)])[0]
if not flag_read_attr:
for n in range(iter_num):
acts_ = acts * float(n) / iter_num
if SG_path:
acts_ *= (np.random.uniform(0, 1, [528]) + np.random.uniform(0, 1, [528])) / 1.5
grad = t_grad.eval({T(layer): acts_})
attr += grad[0]
attr = attr * (1.0 / iter_num) * acts[0]
attr = np.sum(np.sum(attr, 0), 0)
np.savetxt(save_directory + "/{}_{}_{}.txt".format(flag1, layer, attr_class), attr)
else:
attr = np.loadtxt(save_directory + "/{}_{}_{}.txt".
format(flag1, layer, attr_class)).astype(np.float32)
# AM_T[i_wanted_layer] = attr * (attr > 0)
kept_channel_idx = np.squeeze(np.argwhere(attr > np.nanmean(np.where(attr > 0, attr, np.nan))))
acts_squeeze = np.squeeze(acts)
attr_temp = np.squeeze(attr).astype(np.float32)
# print(np.count_nonzero(clear_channel_idx))
channel_attr_list[i_wanted_layer] = attr_temp
AM_T[i_wanted_layer] = acts_squeeze[..., kept_channel_idx]
kept_channel_list[i_wanted_layer] = kept_channel_idx
# # alltests_Shap/test0_4
# kept_channel_idx = np.squeeze(np.argwhere(attr < 0))
# # alltests_Shap/test0_6
kept_channel_idx = np.squeeze(np.argwhere(attr < np.nanmean(np.where(attr < 0, attr, np.nan))))
AM_T_reverse[i_wanted_layer] = acts_squeeze[..., kept_channel_idx]
kept_channel_list_reverse[i_wanted_layer] = kept_channel_idx
AM_list = AM_T
logit_list = sess.run([logit])[0]
return [AM_list, AM_T_reverse], logit_list, channel_attr_list, \
[kept_channel_list, kept_channel_list_reverse]
def compute_all_am_shap(img, model, attr_class, layers,
flag1, flag_read_attr=True,
iter_num=2 ** 8, labels=None, save_directory=None):
"""
Using sampling based Shapley method to compute feature attributions
Return all attributions and AM not just positive or negative ones
"""
with tf.Graph().as_default(), tf.Session() as sess, gradient_override_map({}):
# img = tf.image.resize_image_with_crop_or_pad(img, model.image_shape[0], model.image_shape[0])
# imgnp = sess.run(img)
# imgnp = imgnp.reshape(224, 224, 3)
# plt.imsave("./doghead224.jpeg", imgnp)
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
# grads_cam_T = [T(layer) for layer in layers]
# logit = T("softmax2_pre_activation")[0]
# score = T("output2")[0, labels.index(attr_class)]
logit = T("softmax2_pre_activation")[0]
AM_T = list(range(len(layers)))
channel_attr_list = list(range(len(layers)))
ori_logit = logit.eval()
y_label = np.zeros_like(ori_logit)
y_label[labels.index(attr_class)] = 1
# index_class_logit = ori_logit[labels.index(attr_class)]
# detected_label_index = ori_logit.argmax()
# print("detected label index: {}, real label index: {}, label name: {}"
# .format(detected_label_index, labels.index(attr_class), attr_class))
for i_wanted_layer in range(len(layers)):
layer = layers[i_wanted_layer]
acts = T(layer).eval()
acts_shape = list(acts.shape)
# part_name = "import/{}:0".format(layer)
# t_part_input = tf.placeholder(acts.dtype, acts_shape)
# T_part = import_part_model(model, t_part_input, part_name)
# part_logit = T_part("softmax2_pre_activation")[0]
n_features = acts.shape[-1]
if not flag_read_attr:
result = np.zeros((1, n_features))
run_shape = acts_shape.copy()
# run_shape = np.delete(run_shape, -1).tolist()
# run_shape.insert(-1, -1)
reconstruction_shape = [1, acts_shape[-1]]
for r in range(iter_num):
p = np.random.permutation(n_features)
x = acts.copy().reshape(run_shape)
y = None
for i in p:
if y is None:
y = logit.eval({T(layer): x})
# y = model.predict(x.reshape(acts_shape))
x[..., i] = 0
y0 = logit.eval({T(layer): x})
# print("Ori logit score: {}, new logit score: {}"
# .format(index_class_logit, y0[labels.index(attr_class)]))
# y0 = model.predict(x.reshape(acts_shape))
assert y0.shape == y_label.shape, y0.shape
prediction_delta = np.sum((y - y0) * y_label)
result[:, i] += prediction_delta
y = y0
attr = np.squeeze(
(result.copy() / iter_num).reshape(reconstruction_shape).astype(np.float32))
np.savetxt(save_directory + "/{}_{}_{}.txt".format(flag1, layer, attr_class), attr)
else:
attr = np.loadtxt(save_directory + "/{}_{}_{}.txt".format(flag1, layer, attr_class)).astype(np.float32)
acts_squeeze = np.squeeze(acts)
attr_temp = np.squeeze(attr).astype(np.float32)
channel_attr_list[i_wanted_layer] = attr_temp
AM_T[i_wanted_layer] = acts_squeeze
AM_list = AM_T
logit_list = sess.run([logit])[0]
return AM_list, logit_list, channel_attr_list
def compute_all_am_igsg(img, model, attr_class, layers,
flag1, flag_read_attr=True,
iter_num=2 ** 8, SG_path=False,
labels=None, save_directory=None):
"""
Using Aumann Shapley values as feature attributions
Return all attributions and AM not just positive or negative ones
"""
with tf.Graph().as_default(), tf.Session() as sess, gradient_override_map({}):
# img = tf.image.resize_image_with_crop_or_pad(img, model.image_shape[0], model.image_shape[0])
# imgnp = sess.run(img)
# imgnp = imgnp.reshape(224, 224, 3)
# plt.imsave("./doghead224.jpeg", imgnp)
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
# grads_cam_T = [T(layer) for layer in layers]
# logit = T("softmax2_pre_activation")[0]
# score = T("output2")[0, labels.index(attr_class)]
logit = T("softmax2_pre_activation")[0]
logit4grad = T("softmax2_pre_activation")[0, labels.index(attr_class)]
AM_T = list(range(len(layers)))
channel_attr_list = list(range(len(layers)))
ori_logit = logit.eval()
ori_single_logit = logit4grad.eval()
y_label = np.zeros_like(ori_logit)
y_label[labels.index(attr_class)] = 1
# index_class_logit = ori_logit[labels.index(attr_class)]
# detected_label_index = ori_logit.argmax()
# print("detected label index: {}, real label index: {}, label name: {}"
# .format(detected_label_index, labels.index(attr_class), attr_class))
for i_wanted_layer in range(len(layers)):
layer = layers[i_wanted_layer]
acts = T(layer).eval()
attr = np.zeros(acts.shape[1:])
t_grad = tf.gradients([logit4grad], [T(layer)])[0]
if not flag_read_attr:
for n in range(iter_num):
acts_ = acts * float(n) / iter_num
if SG_path:
acts_ *= (np.random.uniform(0, 1, [528]) + np.random.uniform(0, 1, [528])) / 1.5
grad = t_grad.eval({T(layer): acts_})
attr += grad[0]
attr = attr * (1.0 / iter_num) * acts[0]
attr = np.sum(np.sum(attr, 0), 0)
# normalize IG result for degub differences
attr = attr * ori_single_logit / np.sum(attr)
np.savetxt(save_directory + "/{}_{}_{}.txt".format(flag1, layer, attr_class), attr)
else:
attr = np.loadtxt(save_directory + "/{}_{}_{}.txt".
format(flag1, layer, attr_class)).astype(np.float32)
attr_temp = np.squeeze(attr).astype(np.float32)
channel_attr_list[i_wanted_layer] = attr_temp
acts_squeeze = np.squeeze(acts)
AM_T[i_wanted_layer] = acts_squeeze
AM_list = AM_T
logit_list = sess.run([logit])[0]
return AM_list, logit_list, channel_attr_list
| 42.615385
| 111
| 0.629771
| 2,193
| 15,512
| 4.181943
| 0.097583
| 0.033366
| 0.031403
| 0.037073
| 0.903827
| 0.891724
| 0.876895
| 0.873514
| 0.873514
| 0.865773
| 0
| 0.022388
| 0.231176
| 15,512
| 363
| 112
| 42.732782
| 0.746604
| 0.271338
| 0
| 0.937173
| 0
| 0
| 0.024549
| 0.013999
| 0
| 0
| 0
| 0
| 0.010471
| 1
| 0.020942
| false
| 0
| 0.041885
| 0
| 0.08377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8d856bc05540e61179326b029317841542a00d5e
| 134
|
py
|
Python
|
cuchem/cuchem/wf/generative/__init__.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
cuchem/cuchem/wf/generative/__init__.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
cuchem/cuchem/wf/generative/__init__.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
from cuchem.wf.generative.cddd import Cddd as Cddd
from cuchem.wf.generative.megatronmolbart import MegatronMolBART as MegatronMolBART
| 67
| 83
| 0.873134
| 18
| 134
| 6.5
| 0.444444
| 0.17094
| 0.205128
| 0.376068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08209
| 134
| 2
| 83
| 67
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a5fea24c770834f56924b44ab5355614dd1bd962
| 10,794
|
py
|
Python
|
maze_test.py
|
wadiim/maze
|
ff0509ef2cf9b3acf90a8647ba712ba2b7c99aba
|
[
"MIT"
] | 1
|
2020-10-28T05:49:04.000Z
|
2020-10-28T05:49:04.000Z
|
maze_test.py
|
wadiim/maze
|
ff0509ef2cf9b3acf90a8647ba712ba2b7c99aba
|
[
"MIT"
] | null | null | null |
maze_test.py
|
wadiim/maze
|
ff0509ef2cf9b3acf90a8647ba712ba2b7c99aba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from maze import Shape, get_shape, cell_to_string, maze_to_string, solve_maze
import unittest
class GetShapeTest(unittest.TestCase):
def test_inner_cell(self):
maze = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]
shape = Shape(down = True, right = True)
self.assertEqual(get_shape(maze, x = 1, y = 2), shape)
def test_outer_cell(self):
maze = [[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0]]
shape = Shape(left = True, up = True)
self.assertEqual(get_shape(maze, x = 3, y = 0), shape)
def test_different_values(self):
maze = [[3, 1, 5, 4],
[2, 2, 2, 8],
[0, 2, 6, 7],
[1, 7, 1, 3]]
shape = Shape(up = True, down = True, right = True)
self.assertEqual(get_shape(maze, x = 1, y = 1), shape)
class CellToStringTest(unittest.TestCase):
def test_empty_cell(self):
maze = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), ' ')
def test_horizontal(self):
maze = [[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '───')
def test_vertical(self):
maze = [[0, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), ' │ ')
def test_down_and_right(self):
maze = [[0, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), ' ┌─')
def test_down_and_left(self):
maze = [[0, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '─┐ ')
def test_up_and_right(self):
maze = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), ' └─')
def test_up_and_left(self):
maze = [[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '─┘ ')
def test_vertical_and_right(self):
maze = [[0, 0, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), ' ├─')
def test_vertical_and_left(self):
maze = [[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '─┤ ')
def test_down_and_horizontal(self):
maze = [[0, 1, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '─┬─')
def test_up_and_horizontal(self):
maze = [[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '─┴─')
def test_vertical_and_horizontal(self):
maze = [[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '─┼─')
def test_up(self):
maze = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), ' │ ')
def test_down(self):
maze = [[0, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), ' │ ')
def test_left(self):
maze = [[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '───')
def test_right(self):
maze = [[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 1, y = 1), '───')
def test_top_left_corner(self):
maze = [[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 0, y = 3), ' ┌─')
def test_top_right_corner(self):
maze = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 1]]
self.assertEqual(cell_to_string(maze, x = 3, y = 3), '─┐ ')
def test_bottom_left_corner(self):
maze = [[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 0, y = 0), ' └─')
def test_bottom_right_corner(self):
maze = [[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]]
self.assertEqual(cell_to_string(maze, x = 3, y = 0), '─┘ ')
class MazeToStringTest(unittest.TestCase):
def test_empty_maze(self):
maze = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
str = (" \n"
" \n"
" \n"
" ")
self.assertEqual(maze_to_string(maze), str)
def test_complex_maze(self):
maze = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
str = (" ┌──── ┌──────────────┬─────┐ \n"
" │ │ │ │ \n"
" ├──── │ ┌──── │ │ \n"
" │ │ │ \n"
" │ ┌────────┴──┐ ───────┤ \n"
" │ │ │ │ \n"
" │ │ │ │ │ │ \n"
" │ │ ├─────┼─────┤ \n"
" │ ┌─────┘ │ │ │ \n"
" │ │ │ \n"
" └─────┴──────────────── ────┘ ")
self.assertEqual(maze_to_string(maze), str)
def test_complex_maze_with_solution(self):
maze = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 2, 2, 2, 2, 2, 1, 0, 1],
[1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2],
[1, 0, 1, 2, 2, 2, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 1, 2, 1, 0, 0, 0, 1],
[1, 2, 2, 2, 2, 2, 1, 1, 1, 0, 1],
[1, 2, 1, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 2, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[2, 2, 1, 1, 1, 0, 1, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
str = (" ┌──── │ ┌──────────────┬─────┐ \n"
" │ │ │ │ │ \n"
" ├──── │ │ ┌──── │ │ \n"
" │ ┌──┘ │ │ \n"
" │ │ ┌────────┴──┐ ───────┤ \n"
" │ │ │ ┌─────┐ │ │ \n"
" │ │ │ │ │ │ │ │ │ \n"
" │ └─────┘ │ │ ├─────┼─────┤ \n"
" │ ┌─────┘ │ │ │ │ \n"
" │ │ └────────┐ │ \n"
" └─────┴──────────────── │ ────┘ ")
self.assertEqual(maze_to_string(maze), str)
class SolveMazeTest(unittest.TestCase):
def test_horizontal_single_path(self):
maze = [[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1]]
solution = [[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 1]]
solve_maze(maze)
self.assertEqual(maze, solution)
def test_vertical_single_path(self):
maze = [[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]]
solution = [[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[1, 1, 1, 1, 1]]
solve_maze(maze)
self.assertEqual(maze, solution)
def test_curved_path(self):
maze = [[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[1, 0, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]]
solution = [[1, 1, 1, 1, 1],
[2, 2, 1, 2, 2],
[1, 2, 1, 2, 1],
[1, 2, 2, 2, 1],
[1, 1, 1, 1, 1]]
solve_maze(maze)
self.assertEqual(maze, solution)
def test_multiple_pathes(self):
maze = [[1, 1, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 1, 1, 1]]
solution = [[1, 1, 1, 1, 1],
[2, 2, 0, 0, 1],
[1, 2, 1, 1, 1],
[1, 2, 0, 0, 1],
[1, 2, 1, 1, 1]]
solve_maze(maze)
self.assertEqual(maze, solution)
def test_complex_maze(self):
maze = [[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 1]]
solution = [[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 2, 2, 2, 2],
[1, 1, 1, 2, 1, 1, 1],
[1, 0, 1, 2, 2, 2, 1],
[1, 0, 1, 1, 1, 2, 1],
[1, 2, 2, 2, 2, 2, 1],
[1, 2, 1, 1, 1, 1, 1]]
solve_maze(maze)
self.assertEqual(maze, solution)
if __name__ == '__main__':
unittest.main()
| 34.707395
| 77
| 0.313137
| 1,572
| 10,794
| 2.284351
| 0.054707
| 0.173768
| 0.191312
| 0.186021
| 0.802005
| 0.77555
| 0.753551
| 0.710944
| 0.68616
| 0.629351
| 0
| 0.166966
| 0.483417
| 10,794
| 310
| 78
| 34.819355
| 0.413917
| 0.001946
| 0
| 0.5
| 0
| 0
| 0.082444
| 0.008356
| 0
| 0
| 0
| 0
| 0.113971
| 1
| 0.113971
| false
| 0
| 0.007353
| 0
| 0.136029
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
574e5de1f23065be99d681f0425ed5fae7b1fe47
| 111,564
|
py
|
Python
|
scripts/securitygroup/test_regression.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 1
|
2017-06-13T04:42:34.000Z
|
2017-06-13T04:42:34.000Z
|
scripts/securitygroup/test_regression.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
scripts/securitygroup/test_regression.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from tcutils.wrappers import preposttest_wrapper
from vnc_api.vnc_api import NoIdError
from verify import VerifySecGroup
from policy_test import PolicyFixture
from vn_test import MultipleVNFixture
from vm_test import MultipleVMFixture
from base import BaseSGTest
from common.policy.config import ConfigPolicy
from security_group import SecurityGroupFixture,get_secgrp_id_from_name
from vn_test import VNFixture
from vm_test import VMFixture
from tcutils.topo.topo_helper import *
import os
import sys
sys.path.append(os.path.realpath('scripts/flow_tests'))
from tcutils.topo.sdn_topo_setup import *
import test
import sdn_sg_test_topo
from tcutils.tcpdump_utils import *
from time import sleep
from tcutils.util import get_random_name
from base_traffic import *
from tcutils.util import skip_because
import test_regression_basic
class SecurityGroupRegressionTests2(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests2, cls).setUpClass()
cls.option = 'openstack'
def setUp(self):
super(SecurityGroupRegressionTests2, self).setUp()
self.create_sg_test_resources()
def tearDown(self):
self.logger.debug("Tearing down SecurityGroupRegressionTests2.")
super(SecurityGroupRegressionTests2, self).tearDown()
def runTest(self):
pass
@preposttest_wrapper
def test_sec_group_with_proto(self):
"""
Description: Verify security group with allow specific protocol on all ports and policy with allow all between VN's
Steps:
1. create the resources VN,VM,policy,SG
2. update the SG rules with proto tcp(for sg1) and udp(sg2)
3. verify if traffic allowed is as per the proto allowed in SG rule
Pass criteria: step 3 should pass
"""
self.logger.info("Configure the policy with allow any")
rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': self.vn1_name,
'src_ports': [0, -1],
'dest_network': self.vn2_name,
'dst_ports': [0, -1],
'simple_action': 'pass',
},
]
self.config_policy_and_attach_to_vn(rules)
rule = [{'direction': '<>',
'protocol': 'tcp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'tcp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg1_fix.replace_rules(rule)
rule = [{'direction': '<>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg2_fix.replace_rules(rule)
self.verify_sec_group_port_proto()
return True
@preposttest_wrapper
def test_sec_group_with_port(self):
"""
Description: Verify security group with allow specific protocol/port and policy with allow all between VN's
Steps:
1. create the resources VN,VM,policy,SG
2. update the SG rules with proto tcp(for sg1) and udp(sg2) and open port 8000-9000
3. verify if traffic allowed is as per the proto/port allowed in SG rule
Pass criteria: step 3 should pass
"""
self.logger.info("Configure the policy with allow any")
rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': self.vn1_name,
'src_ports': [0, -1],
'dest_network': self.vn2_name,
'dst_ports': [0, -1],
'simple_action': 'pass',
},
]
self.config_policy_and_attach_to_vn(rules)
rule = [{'direction': '<>',
'protocol': 'tcp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 8000, 'end_port': 9000}],
'src_ports': [{'start_port': 8000, 'end_port': 9000}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'tcp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 8000, 'end_port': 9000}],
'dst_ports': [{'start_port': 8000, 'end_port': 9000}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg1_fix.replace_rules(rule)
rule = [{'direction': '<>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 8000, 'end_port': 9000}],
'src_ports': [{'start_port': 8000, 'end_port': 9000}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 8000, 'end_port': 9000}],
'dst_ports': [{'start_port': 8000, 'end_port': 9000}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg2_fix.replace_rules(rule)
self.verify_sec_group_port_proto(port_test=True)
return True
#end class SecurityGroupRegressionTests2
class SecurityGroupRegressionTests3(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests3, cls).setUpClass()
cls.option = 'openstack'
def setUp(self):
super(SecurityGroupRegressionTests3, self).setUp()
self.create_sg_test_resources()
def tearDown(self):
self.logger.debug("Tearing down SecurityGroupRegressionTests3.")
super(SecurityGroupRegressionTests3, self).tearDown()
def runTest(self):
pass
@preposttest_wrapper
def test_sec_group_with_proto_and_policy_to_allow_only_tcp(self):
"""
Description: Verify security group with allow specific protocol on all ports and policy with allow only TCP between VN's
Steps:
1. create the resources VN,VM,policy,SG
2. update the SG rules with proto tcp(for sg1) and udp(sg2)
3. verify if traffic allowed is as per the proto allowed in SG rule and policy
Pass criteria: step 3 should pass
"""
self.logger.info("Configure the policy with allow TCP only rule.")
rules = [
{
'direction': '<>',
'protocol': 'tcp',
'source_network': self.vn1_name,
'src_ports': [0, -1],
'dest_network': self.vn2_name,
'dst_ports': [0, -1],
'simple_action': 'pass',
},
]
self.config_policy_and_attach_to_vn(rules)
rule = [{'direction': '<>',
'protocol': 'tcp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'tcp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg1_fix.replace_rules(rule)
rule = [{'direction': '<>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg2_fix.replace_rules(rule)
self.verify_sec_group_with_udp_and_policy_with_tcp()
return True
@preposttest_wrapper
def test_sec_group_with_proto_and_policy_to_allow_only_tcp_ports(self):
"""
Description: Verify security group with allow specific protocol on all ports and policy with allow only TCP on specifif ports between VN's
Steps:
1. create the resources VN,VM,policy,SG
2. update the SG rules with proto tcp(for sg1) and udp(sg2)
3. verify if traffic allowed is as per the proto allowed in SG rule and port in policy
Pass criteria: step 3 should pass
"""
self.logger.info(
"Configure the policy with allow TCP port 8000/9000 only rule.")
rules = [
{
'direction': '<>',
'protocol': 'tcp',
'source_network': self.vn1_name,
'src_ports': [8000, 8000],
'dest_network': self.vn2_name,
'dst_ports': [9000, 9000],
'simple_action': 'pass',
},
]
self.config_policy_and_attach_to_vn(rules)
rule = [{'direction': '<>',
'protocol': 'tcp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'tcp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg1_fix.replace_rules(rule)
rule = [{'direction': '<>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg2_fix.replace_rules(rule)
self.verify_sec_group_with_udp_and_policy_with_tcp_port()
return True
#end class SecurityGroupRegressionTests3
class SecurityGroupRegressionTests4(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests4, cls).setUpClass()
cls.option = 'openstack'
def runTest(self):
pass
@preposttest_wrapper
@skip_because(feature='multi-subnet')
def test_vn_compute_sg_comb(self):
"""
Description: Verify traffic between intra/inter VN,intra/inter compute and same/diff default/user-define SG
Steps:
1. define the topology for intra/inter VN,intra/inter compute and same/diff default/user-define SG
2. create the resources as defined in the topo
3. verify the traffic
Pass criteria: step 3 should pass
"""
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_4vn_xvm_config
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
try:
# provided by wrapper module if run in parallel test env
topo = topology_class_name(
project=self.project.project_name,
username=self.project.username,
password=self.project.password, compute_node_list=self.connections.orch.get_hosts(),config_option=self.option)
except (AttributeError,NameError):
topo = topology_class_name(compute_node_list=self.connections.orch.get_hosts(),config_option=self.option)
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map,config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
self.start_traffic_and_verify_negative_cases(topo_obj, config_topo)
return True
#end test_vn_compute_sg_comb
#end class SecurityGroupRegressionTests4
class SecurityGroupRegressionTests5(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests5, cls).setUpClass()
cls.option = 'openstack'
def setUp(self):
super(SecurityGroupRegressionTests5, self).setUp()
self.create_sg_test_resources()
def tearDown(self):
self.logger.debug("Tearing down SecurityGroupRegressionTests2.")
super(SecurityGroupRegressionTests5, self).tearDown()
def runTest(self):
pass
@preposttest_wrapper
def test_sec_group_with_proto_double_rules_sg1(self):
"""
Description: Verify security group with allow tcp/udp protocol on all ports and policy with allow all between VN's
Steps:
1. create the resources VN,VM,policy,SG
2. update the SG rules with proto tcp/udp
3. verify if traffic allowed is as per the proto allowed in SG rule
Pass criteria: step 3 should pass
"""
self.logger.info("Configure the policy with allow any")
rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': self.vn1_name,
'src_ports': [0, -1],
'dest_network': self.vn2_name,
'dst_ports': [0, -1],
'simple_action': 'pass',
},
]
self.config_policy_and_attach_to_vn(rules)
rule = [{'direction': '<>',
'protocol': 'tcp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'tcp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg1_fix.replace_rules(rule)
rule = [{'direction': '<>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
self.sg2_fix.replace_rules(rule)
self.verify_sec_group_port_proto(double_rule=True)
return True
#end test_sec_group_with_proto_double_rules_sg1
@preposttest_wrapper
def test_default_sg(self):
"""
Description: test default security group
Steps:
1. try to delete default sg, should fail
2. add/delete rules and verify the rules with traffic
Pass criteria: step 1 and 2 should pass
"""
self.logger.info("Configure the policy with allow any")
rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': self.vn1_name,
'src_ports': [0, -1],
'dest_network': self.vn2_name,
'dst_ports': [0, -1],
'simple_action': 'pass',
},
]
self.config_policy_and_attach_to_vn(rules)
#try to delete default sg
secgrp_fq_name = ':'.join(['default-domain',
self.inputs.project_name,
'default'])
sg_id = get_secgrp_id_from_name(
self.connections,
secgrp_fq_name)
try:
self.orch.delete_security_group(sg_id)
except Exception, msg:
self.logger.info(msg)
self.logger.info(
"Not able to delete the default security group as expected")
else:
try:
secgroup = self.vnc_lib.security_group_read(
fq_name=secgrp_fq_name)
self.logger.info(
"Not able to delete the default security group as expected")
except NoIdError:
errmsg = "default Security group deleted"
self.logger.error(errmsg)
assert False, errmsg
#delete egress rule and add new rules and verify with traffic
self.sg1_fix.delete_all_rules(sg_id)
rule = [{'direction': '<>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}},
{'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
secgrp_rules = self.sg1_fix.create_sg_rule(sg_id,secgrp_rules=rule)
assert secgrp_rules
sender = (self.vm1_fix, self.sg2_fix.secgrp_name)
receiver = (self.vm6_fix, 'default')
self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'pass')
#revert back default sg
self.sg1_fix.delete_all_rules(sg_id)
rule = [{'direction': '<>',
'protocol': 'any',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '<>',
'protocol': 'any',
'src_addresses': [{'security_group':secgrp_fq_name}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
secgrp_rules = self.sg1_fix.create_sg_rule(sg_id,secgrp_rules=rule)
assert secgrp_rules
return True
#end test_default_sg
#end class SecurityGroupRegressionTests5
class SecurityGroupRegressionTests6(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests6, cls).setUpClass()
cls.option = 'openstack'
def runTest(self):
pass
@preposttest_wrapper
@skip_because(feature='multi-subnet')
def test_sg_stateful(self):
"""
Description: Test if SG is stateful
1. test if inbound traffic without allowed ingress rule is allowed
2. Test if outbound traffic without allowed egress rule is allowed
3. test traffic betwen SG with only ingress/egress rule
Steps:
1. define the topology for the test
2. create the resources as defined in the topo
3. verify the traffic
Pass criteria: step 3 should pass
"""
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_config
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo_sg_stateful(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,config_option=self.option)
except (AttributeError,NameError):
topo.build_topo_sg_stateful(config_option=self.option)
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
self.start_traffic_and_verify(topo_obj, config_topo, traffic_reverse=False)
return True
#end test_sg_stateful
@preposttest_wrapper
@skip_because(feature='multi-tenant')
def test_sg_multiproject(self):
"""
Description: Test SG across projects
Steps:
1. define the topology for the test
2. create the resources as defined in the topo
3. verify the traffic
Pass criteria: step 3 should pass
"""
topology_class_name = None
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_config_multiproject
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
self.topo = topo
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
topo_objs = {}
config_topo = {}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.sdn_topo_setup(config_option=self.option)
self.assertEqual(out['result'], True, out['msg'])
if out['result'] == True:
topo_objs, config_topo, vm_fip_info = out['data']
self.start_traffic_and_verify_multiproject(topo_objs, config_topo, traffic_reverse=False)
return True
#end test_sg_multiproject
@preposttest_wrapper
@skip_because(feature='multi-subnet')
def test_sg_no_rule(self):
"""
Description: Test SG without any rule, it should deny all traffic
Steps:
1. define the topology for the test
2. create the resources as defined in the topo
3. verify the traffic denied
Pass criteria: step 3 should pass
"""
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_1vn_2vm_config
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,config_option=self.option)
except (AttributeError,NameError):
topo.build_topo(config_option=self.option)
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
self.start_traffic_and_verify(topo_obj, config_topo, traffic_reverse=True)
return True
#end test_sg_no_rule
#end class SecurityGroupRegressionTests6
class SecurityGroupRegressionTests7(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests7, cls).setUpClass()
cls.option = 'openstack'
def runTest(self):
pass
@preposttest_wrapper
def test_icmp_error_handling1(self):
"""
Description: Test ICMP error handling
1. ingress-udp from same SG, egress-all
2. Test with SG rule, ingress-egress-udp only
3. Test with SG rule, ingress-egress-all
Steps:
1. define the topology for the test
2. create the resources as defined in the topo
3. verify the traffic for each of the cases mentioned in description
Pass criteria: step 3 should pass
"""
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_icmp_error_handling
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,config_option=self.option)
except (AttributeError,NameError):
topo.build_topo(config_option=self.option)
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
#Test SG rule, ingress-udp same SG, egress-all
port = 10000
pkt_cnt = 10
src_vm_name = 'vm1'
dst_vm_name = 'vm3'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
if self.option == 'openstack':
src_vn_fq_name = src_vn_fix.vn_fq_name
else:
src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name())
#start tcpdump on src VM
filters = '\'(icmp[0]=3 and icmp[1]=3 and src host %s and dst host %s)\'' % (dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
#start traffic
sender, receiver = self.start_traffic_scapy(src_vm_fix, dst_vm_fix, 'udp',
port, port,recvr=False)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap)
#stop traffic
sent, recv = self.stop_traffic_scapy(sender, receiver,recvr=False)
#Test with SG rule, ingress-egress-udp only
rule = [{'direction': '>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][topo_obj.sg_list[0]].replace_rules(rule)
#start tcpdump on src VM
filters = '\'(icmp[0]=3 and icmp[1]=3 and src host %s and dst host %s)\'' % (dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
#start traffic
sender, receiver = self.start_traffic_scapy(src_vm_fix, dst_vm_fix, 'udp',
port, port,recvr=False)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap)
#stop traffic
sent, recv = self.stop_traffic_scapy(sender, receiver,recvr=False)
#Test with SG rule, ingress-egress-all
dst_vm_fix = config_topo['vm']['vm2']
rule = [{'direction': '>',
'protocol': 'any',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'any',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][topo_obj.sg_list[0]].replace_rules(rule)
#start tcpdump on src VM
filters = '\'(icmp[0]=3 and icmp[1]=3 and src host %s and dst host %s)\'' % (dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
#start traffic
sender, receiver = self.start_traffic_scapy(src_vm_fix, dst_vm_fix, 'udp',
port, port,recvr=False)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap)
#stop traffic
sent, recv = self.stop_traffic_scapy(sender, receiver,recvr=False)
return True
#end test_icmp_error_handling1
@preposttest_wrapper
def test_icmp_error_handling2(self):
"""
Description:
1. Test ICMP error handling with SG rules egress-udp only
2. Test ICMP error from agent
Steps:
1. define the topology for the test
2. create the resources as defined in the topo
3. verify the traffic for each of the cases mentioned in description
Pass criteria: step 3 should pass
"""
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_icmp_error_handling
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo2(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,
compute_node_list=self.connections.orch.get_hosts(),config_option=self.option)
except (AttributeError,NameError):
topo.build_topo2(compute_node_list=self.connections.orch.get_hosts(),config_option=self.option)
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map,config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
#Test with SG rule, egress-udp only
port = 10000
pkt_cnt = 10
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
if self.option == 'openstack':
src_vn_fq_name = src_vn_fix.vn_fq_name
else:
src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name())
#start tcpdump on src VM
filters = '\'(icmp[0]=3 and icmp[1]=3 and src host %s and dst host %s)\'' % (dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
#start traffic
sender, receiver = self.start_traffic_scapy(src_vm_fix, dst_vm_fix, 'udp',
port, port,recvr=False)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap)
#stop traffic
sent, recv = self.stop_traffic_scapy(sender, receiver,recvr=False)
#Test ICMP error from agent
if len(self.connections.orch.get_hosts()) < 2:
self.logger.info("Skipping second case(Test ICMP error from agent), \
this test needs atleast 2 compute nodes")
raise self.skipTest("Skipping second case(Test ICMP error from agent), \
this test needs atleast 2 compute nodes")
return True
rule = [{'direction': '>',
'protocol': 'icmp',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'icmp',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][topo_obj.sg_list[0]].replace_rules(rule)
vn1_name = "test_vnv6sr"
vn1_net = ['2001::101:0/120']
#vn1_fixture = self.config_vn(vn1_name, vn1_net)
vn1_fixture = self.useFixture(VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn1_name, inputs=self.inputs, subnets=vn1_net))
assert vn1_fixture.verify_on_setup()
vn2_name = "test_vnv6dn"
vn2_net = ['2001::201:0/120']
#vn2_fixture = self.config_vn(vn2_name, vn2_net)
vn2_fixture = self.useFixture(VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn2_name, inputs=self.inputs, subnets=vn2_net))
assert vn2_fixture.verify_on_setup()
vm1_name = 'source_vm'
vm2_name = 'dest_vm'
#vm1_fixture = self.config_vm(vn1_fixture, vm1_name)
#vm2_fixture = self.config_vm(vn2_fixture, vm2_name)
self.inputs.set_af('dual')
vm1_fixture = self.useFixture(VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=vn1_fixture.obj, vm_name=vm1_name, node_name=None,
image_name='ubuntu-traffic', flavor='contrail_flavor_small'))
vm2_fixture = self.useFixture(VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=vn2_fixture.obj, vm_name=vm2_name, node_name=None,
image_name='ubuntu-traffic', flavor='contrail_flavor_small'))
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
rule = [
{
'direction': '<>',
'protocol': 'any',
'source_network': vn1_name,
'src_ports': [0, -1],
'dest_network': vn2_name,
'dst_ports': [0, -1],
'simple_action': 'pass',
},
]
policy_name = 'allow_all'
policy_fixture = self.config_policy(policy_name, rule)
vn1_policy_fix = self.attach_policy_to_vn(
policy_fixture, vn1_fixture)
vn2_policy_fix = self.attach_policy_to_vn(
policy_fixture, vn2_fixture)
if self.option == 'openstack':
src_vn_fq_name = src_vn_fix.vn_fq_name
else:
src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name())
self.logger.info("Increasing MTU on src VM and ping with bigger size and reverting MTU")
cmd_ping = ('ping -M want -s 2500 -c 10 %s | grep \"Frag needed and DF set\"' %
(dst_vm_fix.vm_ip))
# cmd_tcpdump = 'tcpdump -vvv -c 5 -ni eth0 -v icmp > /tmp/op1.log'
output = src_vm_fix.run_cmd_on_vm(cmds=['''netstat -anr |grep ^0.0.0.0 | awk '{ print $2 }' '''], as_sudo=True)
gw = output.values()[0].split('\r\n')[-1]
filters = 'icmp'
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
cmds = ['ifconfig eth0 mtu 3000', cmd_ping,
'ifconfig eth0 mtu 1500']
output = src_vm_fix.run_cmd_on_vm(cmds=cmds, as_sudo=True, as_daemon=True)
cmd = 'tcpdump -r %s' % pcap
cmd_check_icmp, err = execute_cmd_out(session, cmd, self.logger)
cmd_df = re.search('need to frag', cmd_check_icmp)
self.logger.debug("output for ping cmd: %s" % output[cmd_ping])
cmd_next_icmp = re.search('.+ seq 2, length (\d\d\d\d).*', cmd_check_icmp)
icmpmatch = ("%s > %s: ICMP %s unreachable - need to frag" %
(gw, src_vm_fix.vm_ip, dst_vm_fix.vm_ip))
if not ((icmpmatch in cmd_check_icmp) and ("need to frag" in cmd_df.group(0))
and (cmd_next_icmp.group(1) < '1500')
and ("Frag needed and DF set" in output[cmd_ping])):
self.logger.error("expected ICMP error for type 3 code 4 not found")
stop_tcpdump_for_vm_intf(self, session, pcap)
return False
stop_tcpdump_for_vm_intf(self, session, pcap)
self.logger.info("increasing MTU on src VM and ping6 with bigger size and reverting MTU")
cmd_ping = 'ping6 -s 2500 -c 10 %s | grep \"Packet too big\"' % (vm2_fixture.vm_ip)
src_vn_fq_name = vn1_fixture.vn_fq_name
gw = vm1_fixture.vm_ip
gw = gw.split(':')
gw[-1] = '1'
gw = ':'.join(gw)
filters = 'icmp6'
session, pcap = start_tcpdump_for_vm_intf(self, vm1_fixture, src_vn_fq_name, filters = filters)
cmds = ['ifconfig eth0 mtu 3000', cmd_ping,
'ifconfig eth0 mtu 1500']
output = vm1_fixture.run_cmd_on_vm(cmds=cmds, as_sudo=True, as_daemon=True)
cmd = 'tcpdump -r %s' % pcap
cmd_check_icmp, err = execute_cmd_out(session, cmd, self.logger)
self.logger.debug("output for ping cmd: %s" % output[cmd_ping])
cmd_next_icmp = re.search('.+ ICMP6, packet too big, mtu (\d\d\d\d).*', cmd_check_icmp)
icmpmatch = ("ICMP6, packet too big")
if not ((icmpmatch in cmd_check_icmp) and (cmd_next_icmp.group(1) < '1500')
and ("Packet too big" in output[cmd_ping])):
self.logger.error("expected ICMP6 error for type 2 packet too big message not found")
stop_tcpdump_for_vm_intf(self, session, pcap)
# output = vm1_fixture.run_cmd_on_vm(cmds='rm /tmp/op.log', as_sudo=True)
return False
stop_tcpdump_for_vm_intf(self, session, pcap)
return True
#end test_icmp_error_handling2
@preposttest_wrapper
@skip_because(feature='service-instance')
def test_icmp_error_handling_from_mx_with_si(self):
"""
Description: Test ICMP error handling from MX with SI in the middle
1. uses traceroute util on the VM
Steps:
1. define the topology for the test
2. create the resources as defined in the topo
3. copy the traceroute pkg to VM and install
4. run the traceroute to 8.8.8.8
5. verify through tcpdump if icmp error recvd on VM
Pass criteria: step 5 should pass
"""
if ('MX_GW_TEST' not in os.environ) or (('MX_GW_TEST' in os.environ) and (os.environ.get('MX_GW_TEST') != '1')):
self.logger.info(
"Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test")
raise self.skipTest(
"Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test")
return True
public_vn_info = {'subnet':[self.inputs.fip_pool], 'router_asn':self.inputs.router_asn, 'rt_number':self.inputs.mx_rt}
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_mx_with_si
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,
public_vn_info=public_vn_info,config_option=self.option)
except (AttributeError,NameError):
topo.build_topo(public_vn_info=public_vn_info,config_option=self.option)
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(skip_verify='no',config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
pol_fix = config_topo['policy'][topo_obj.policy_list[0]]
if self.option == 'openstack':
policy_id = pol_fix.policy_obj['policy']['id']
new_policy_entries = config_topo['policy'][topo_obj.policy_list[1]].policy_obj['policy']['entries']
data = {'policy': {'entries': new_policy_entries}}
pol_fix.update_policy(policy_id, data)
else:
policy_name = topo_obj.policy_list[0]
proj_obj = pol_fix._conn_drv.project_read(['default-domain',self.project.project_name])
new_policy_entries = pol_fix._conn_drv.network_policy_read(['default-domain',
self.project.project_name,
topo_obj.policy_list[1]]).network_policy_entries
net_policy_obj = NetworkPolicy(
policy_name, network_policy_entries=new_policy_entries,
parent_obj=proj_obj)
pol_fix._conn_drv.network_policy_update(net_policy_obj)
src_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
if self.option == 'openstack':
src_vn_fq_name = src_vn_fix.vn_fq_name
else:
src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name())
pkg = 'traceroute_2.0.18-1_amd64.deb'
self.logger.info("copying traceroute pkg to the compute node.")
path = os.getcwd() + '/tcutils/pkgs/' + pkg
host_compute = {'username': self.inputs.username, 'password': self.inputs.password, 'ip': src_vm_fix.vm_node_ip}
copy_file_to_server(host_compute,path, '/tmp',pkg)
self.logger.info("copying traceroute from compute node to VM")
with settings(host_string='%s@%s' % (self.inputs.username, src_vm_fix.vm_node_ip),
password=self.inputs.password, warn_only=True, abort_on_prompts=False):
path = '/tmp/' + pkg
output = fab_put_file_to_vm(
host_string='%s@%s' %
(src_vm_fix.vm_username,
src_vm_fix.local_ip),
password=src_vm_fix.vm_password,
src=path,
dest='/tmp')
self.logger.info("installing traceroute")
cmd = 'dpkg -i /tmp/' + pkg
output_cmd_dict = src_vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True)
assert "Setting up traceroute" in output_cmd_dict[cmd], "traceroute pkg installation error, output:%s" % output_cmd_dict[cmd]
self.logger.info("starting tcpdump on src VM")
filters = '\'(icmp[0]=11 and icmp[1]=0)\''
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
self.logger.info("starting traceroute to out of cluster, 8.8.8.8")
cmd = 'traceroute 8.8.8.8'
for i in range(0,4):
output_cmd_dict = src_vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True)
self.logger.info(output_cmd_dict[cmd])
if verify_tcpdump_count(self, session, pcap):
return True
return False
#end test_icmp_error_handling_from_mx_with_si
@preposttest_wrapper
def test_icmp_error_payload_matching(self):
"""
Description: Test ICMP error handling with payload diff. from original packet
1. icmp pakcet with payload matching should be accepted and others should be denied
Steps:
1. define the topology for the test
2. create the resources as defined in the topo
3. send the traffic from sender to unreachable port on recvr side(port 10000 used here), recvr will send icmp error to sender for "destination port unreachable"
4. from recvr side send many other icmp error types in loop
5. sender should recv only icmp error mentioned in step 3 and should NOT recv errors mentioned in step4
Pass criteria: step 5 should pass
"""
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_icmp_error_handling
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo2(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,
compute_node_list=self.connections.orch.get_hosts(),config_option=self.option)
except (AttributeError,NameError):
topo.build_topo2(compute_node_list=self.connections.orch.get_hosts(),config_option=self.option)
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map,config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
#Test with SG rule, egress-udp only and also send diff ICMP error with diff payload
port = 10000
pkt_cnt = 2
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
if self.option == 'openstack':
src_vn_fq_name = src_vn_fix.vn_fq_name
else:
src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name())
#start tcpdump on src VM
filters = '\'(icmp[0]=3 and icmp[1]=3)\''
session1, pcap1 = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
#start traffic
sender1, receiver1 = self.start_traffic_scapy(src_vm_fix, dst_vm_fix, 'udp',
port, port,recvr=False)
icmp_code = 0
for icmp_type in xrange(0,3):
#start tcpdump on src VM
filters = '\'(icmp[0] = %s and icmp[1] = %s)\'' % (icmp_type, icmp_code)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
sender, receiver = self.start_traffic_scapy(dst_vm_fix, src_vm_fix, 'icmp',
port, port, payload="payload",
icmp_type=icmp_type, icmp_code=icmp_code,count=pkt_cnt)
sent, recv = self.stop_traffic_scapy(sender, receiver)
assert sent != 0, "sent count is ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap, exp_count=0), "pkt count in tcpdump is not ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#type 3 , code (0,3)
icmp_type = 3
for icmp_code in xrange(0,3):
#start tcpdump on src VM
filters = '\'(icmp[0] = %s and icmp[1] = %s)\'' % (icmp_type, icmp_code)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
sender, receiver = self.start_traffic_scapy(dst_vm_fix, src_vm_fix, 'icmp',
port, port, payload="payload",
icmp_type=icmp_type, icmp_code=icmp_code,count=pkt_cnt)
sent, recv = self.stop_traffic_scapy(sender, receiver)
assert sent != 0, "sent count is ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap, exp_count=0), "pkt count in tcpdump is not ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#type 3 , code (4,15)
icmp_type = 3
for icmp_code in xrange(4,16):
#start tcpdump on src VM
filters = '\'(icmp[0] = %s and icmp[1] = %s)\'' % (icmp_type, icmp_code)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
sender, receiver = self.start_traffic_scapy(dst_vm_fix, src_vm_fix, 'icmp',
port, port, payload="payload",
icmp_type=icmp_type, icmp_code=icmp_code,count=pkt_cnt)
sent, recv = self.stop_traffic_scapy(sender, receiver)
assert sent != 0, "sent count is ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap, exp_count=0), "pkt count in tcpdump is not ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#type (4,11), code 0
icmp_code = 0
for icmp_type in xrange(4,12):
#start tcpdump on src VM
filters = '\'(icmp[0] = %s and icmp[1] = %s)\'' % (icmp_type, icmp_code)
session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters)
sender, receiver = self.start_traffic_scapy(dst_vm_fix, src_vm_fix, 'icmp',
port, port, payload="payload",
icmp_type=icmp_type, icmp_code=icmp_code,count=pkt_cnt)
sent, recv = self.stop_traffic_scapy(sender, receiver)
assert sent != 0, "sent count is ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session, pcap, exp_count=0), "pkt count in tcpdump is not ZERO for icmp type %s and code %s" % (icmp_type, icmp_code)
#verify packet count and stop tcpdump
assert verify_tcpdump_count(self, session1, pcap1)
#stop traffic
sent, recv = self.stop_traffic_scapy(sender1, receiver1,recvr=False)
return True
#end test_icmp_error_payload_matching
#end class SecurityGroupRegressionTests7
class SecurityGroupRegressionTests8(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests8, cls).setUpClass()
cls.option = 'openstack'
def runTest(self):
pass
@preposttest_wrapper
def test_flow_to_sg_rule_mapping(self):
"""
Description: test flow to security group rule uuid mapping for
1. default SG
2. user-defined SG
Steps:
1. create resources as defined in topology
2. start traffic for specific protocol which matches with specific security group rule
3. get flow records from agent and verify if sg rule uuid matches with corresponding ingress/egress rule id
Pass criteria:
step 3 should PASS
"""
topology_class_name = sdn_sg_test_topo.sdn_topo_flow_to_sg_rule_mapping
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,
compute_node_list=self.inputs.compute_ips,
config_option=self.option)
except (AttributeError, NameError):
topo.build_topo(compute_node_list=self.inputs.compute_ips,
config_option=self.option)
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map,
config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
proto = 'udp'
port = 10000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
default_secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
'default']))
# test with default SG
traffic_obj = BaseTraffic.factory(proto=proto)
assert traffic_obj
assert traffic_obj.start(src_vm_fix, dst_vm_fix,
proto, port, port)
assert self.verify_flow_to_sg_rule_mapping(
src_vm_fix,
dst_vm_fix,
src_vn_fix,
dst_vn_fix,
default_secgrp_id,
proto,
port)
sent, recv = traffic_obj.stop()
# test with user-defined SG
sg_name = topo_obj.sg_list[0]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
src_vm_fix.remove_security_group(secgrp=default_secgrp_id)
dst_vm_fix.remove_security_group(secgrp=default_secgrp_id)
src_vm_fix.add_security_group(secgrp=secgrp_id)
dst_vm_fix.add_security_group(secgrp=secgrp_id)
traffic_obj = BaseTraffic.factory(proto=proto)
assert traffic_obj
assert traffic_obj.start(src_vm_fix, dst_vm_fix,
proto, port, port)
assert self.verify_flow_to_sg_rule_mapping(
src_vm_fix,
dst_vm_fix,
src_vn_fix,
dst_vn_fix,
secgrp_id,
proto,
port)
sent, recv = traffic_obj.stop()
return True
# end test_flow_to_sg_rule_mapping
@preposttest_wrapper
def test_flow_to_sg_rule_mapping_multiple_rules(self):
"""
Description: test flow to security group rule uuid mapping for
1. SG with multiple rules and diff active flows matching diff. rules
2. Multiple SG attached to VMs and diff active flows matching diff. SG
Steps:
1. create resources as defined in topology
2. start traffic for specific protocol which matches with specific security group rule
3. get flow records from agent and verify if sg rule uuid matches with corresponding ingress/egress rule id
Pass criteria:
step 3 should PASS
"""
topology_class_name = sdn_sg_test_topo.sdn_topo_flow_to_sg_rule_mapping
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo2(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,
compute_node_list=self.inputs.compute_ips,
config_option=self.option)
except (AttributeError, NameError):
topo.build_topo2(compute_node_list=self.inputs.compute_ips,
config_option=self.option)
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map,
config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
port = 10000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
# start traffic
traffic_obj_udp = BaseTraffic.factory(proto='udp')
assert traffic_obj_udp
assert traffic_obj_udp.start(src_vm_fix, dst_vm_fix,
'udp', port, port)
traffic_obj_tcp = BaseTraffic.factory(proto='tcp')
assert traffic_obj_tcp
assert traffic_obj_tcp.start(src_vm_fix, dst_vm_fix,
'tcp', port, port)
sender_icmp, receiver_icmp = self.start_traffic_scapy(
src_vm_fix, dst_vm_fix, 'icmp', port, port, payload="payload")
sg_name = topo_obj.sg_list[0]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
assert self.verify_flow_to_sg_rule_mapping(
src_vm_fix,
dst_vm_fix,
src_vn_fix,
dst_vn_fix,
secgrp_id,
'udp',
port)
sg_name = topo_obj.sg_list[1]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
assert self.verify_flow_to_sg_rule_mapping(
src_vm_fix,
dst_vm_fix,
src_vn_fix,
dst_vn_fix,
secgrp_id,
'tcp',
port)
port = 0
sg_name = topo_obj.sg_list[0]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
assert self.verify_flow_to_sg_rule_mapping(
src_vm_fix,
dst_vm_fix,
src_vn_fix,
dst_vn_fix,
secgrp_id,
'icmp',
port)
# stop traffic
sent, recv = traffic_obj_udp.stop()
sent, recv = traffic_obj_tcp.stop()
sent, recv = self.stop_traffic_scapy(sender_icmp, receiver_icmp)
return True
#end test_flow_to_sg_rule_mapping_multiple_rules
@preposttest_wrapper
def test_flow_to_sg_rule_mapping_intra_vn(self):
"""
Description: test flow to security group rule uuid mapping for
1. intra VN traffic with diff SG in src and dst VM
Steps:
1. create resources as defined in topology
2. start traffic for specific protocol which matches with specific security group rule
3. get flow records from agent and verify if sg rule uuid matches with corresponding ingress/egress rule id
Pass criteria:
step 3 should PASS
"""
topology_class_name = sdn_sg_test_topo.sdn_topo_icmp_error_handling
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo2(
project=self.project.project_name,
username=self.project.username,
password=self.project.password, config_option=self.option)
except (AttributeError, NameError):
topo.build_topo2(config_option=self.option)
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
rule = [{'direction': '>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][topo_obj.sg_list[0]].replace_rules(rule)
proto = 'udp'
port = 10000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
src_sg_name = topo_obj.sg_list[0]
dst_sg_name = topo_obj.sg_list[1]
if self.option == 'openstack':
src_vn_fq_name = src_vn_fix.vn_fq_name
dst_vn_fq_name = dst_vn_fix.vn_fq_name
else:
src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name())
dst_vn_fq_name = ':'.join(dst_vn_fix._obj.get_fq_name())
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
src_sg_name]))
# start traffic
traffic_obj = BaseTraffic.factory(proto=proto)
assert traffic_obj
assert traffic_obj.start(src_vm_fix, dst_vm_fix,
proto, port, port)
# get the egress rule uuid
rule_uuid = None
rules = list_sg_rules(self.connections, secgrp_id)
for rule in rules:
if rule['direction'] == 'egress' and (rule['ethertype'] == 'IPv4' or \
rule['remote_ip_prefix'] == '0.0.0.0/0') and \
(rule['protocol'] == 'any' or rule['protocol'] == proto):
rule_uuid = rule['id']
break
assert rule_uuid, "Egress rule id could not be found"
test_result = True
nh_dst = dst_vm_fix.tap_intf[dst_vn_fq_name]['flow_key_idx']
nh = src_vm_fix.tap_intf[src_vn_fq_name]['flow_key_idx']
# verify forward flow on src compute node
if not self.fetch_flow_verify_sg_uuid(
nh, src_vm_fix, dst_vm_fix, port, port, '17',
rule_uuid, src_vm_fix.vm_node_ip):
test_result = False
# verify reverse flow on src compute node
if src_vm_fix.vm_node_ip == dst_vm_fix.vm_node_ip:
nh = nh_dst
if not self.fetch_flow_verify_sg_uuid(
nh, dst_vm_fix, src_vm_fix, port, port, '17',
rule_uuid, src_vm_fix.vm_node_ip):
test_result = False
if src_vm_fix.vm_node_ip != dst_vm_fix.vm_node_ip:
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
dst_sg_name]))
# get the ingress rule uuid
rule_uuid = None
rules = list_sg_rules(self.connections, secgrp_id)
for rule in rules:
if rule['direction'] == 'ingress' and \
(rule['protocol'] == 'any' or rule['protocol'] == proto):
rule_uuid = rule['id']
break
assert rule_uuid, "Ingress rule id could not be found"
# verify forward flow on dst compute node
if not self.fetch_flow_verify_sg_uuid(
nh_dst, src_vm_fix, dst_vm_fix, port, port, '17',
rule_uuid, dst_vm_fix.vm_node_ip):
test_result = False
# verify reverse flow on dst compute node
if not self.fetch_flow_verify_sg_uuid(
nh_dst, dst_vm_fix, src_vm_fix, port, port, '17',
rule_uuid, dst_vm_fix.vm_node_ip):
test_result = False
# stop traffic
sent, recv = traffic_obj.stop()
assert test_result
return True
#end test_flow_to_sg_rule_mapping_intra_vn
@preposttest_wrapper
def test_verify_sg_rule_uuid_in_control_api(self):
"""
1. Verify uuid for each sg rule in api/control introspect and neutron cli"""
topology_class_name = None
#
# Get config for test from topology
result = True
msg = []
if not topology_class_name:
topology_class_name = sdn_sg_test_topo.sdn_topo_icmp_error_handling
self.logger.info("Scenario for the test used is: %s" %
(topology_class_name))
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo2(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,
config_option=self.option)
except (AttributeError, NameError):
topo.build_topo2(config_option=self.option)
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(config_option=self.option)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
rule = [{'direction': '>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
'ethertype': 'IPv4'
},
{'direction': '>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
'ethertype': 'IPv4'
}]
config_topo['sec_grp'][topo_obj.sg_list[0]].replace_rules(rule)
sg_list = ['default', topo_obj.sg_list[0]]
proto = 'udp'
try:
prj_name = self.project.project_name
except (AttributeError, NameError):
prj_name = 'admin'
for sg_name in sg_list:
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
# get the egress and ingress rule uuid
egress_ipv4_id = None
egress_ipv6_id = None
ingress_ipv4_id = None
ingress_ipv6_id = None
rules = list_sg_rules(self.connections, secgrp_id)
for rule in rules:
if rule['direction'] == 'egress' and rule['ethertype'] == 'IPv4':
egress_ipv4_id = rule['id']
elif rule['direction'] == 'ingress' and rule['ethertype'] == 'IPv4':
ingress_ipv4_id = rule['id']
elif rule['direction'] == 'ingress' and rule['ethertype'] == 'IPv6':
ingress_ipv6_id = rule['id']
elif rule['direction'] == 'egress' and rule['ethertype'] == 'IPv6':
egress_ipv6_id = rule['id']
assert egress_ipv4_id, "Egress rule id could not be found"
assert ingress_ipv4_id, "Ingress rule id could not be found"
# get SG rule uuid from api and match with neutron uuid
api_secgrp_obj = self.api_s_inspect.get_cs_secgrp(
project=prj_name,
secgrp=sg_name,
refresh=True)
uuid_egress_ipv4 = None
uuid_ingress_ipv4 = None
uuid_egress_ipv6 = None
uuid_ingress_ipv6 = None
for rule in api_secgrp_obj['security-group']['security_group_entries']['policy_rule']:
if rule['src_addresses'][0]['security_group'] == "local" and rule['ethertype'] == 'IPv4':
uuid_egress_ipv4 = rule['rule_uuid']
elif rule['dst_addresses'][0]['security_group'] == "local" and rule['ethertype'] == 'IPv4':
uuid_ingress_ipv4 = rule['rule_uuid']
elif rule['src_addresses'][0]['security_group'] == "local" and rule['ethertype'] == 'IPv6':
uuid_egress_ipv6 = rule['rule_uuid']
elif rule['dst_addresses'][0]['security_group'] == "local" and rule['ethertype'] == 'IPv6':
uuid_ingress_ipv6 = rule['rule_uuid']
assert uuid_egress_ipv4 == egress_ipv4_id, "egress IPv4 rule uuid is not same in API and \
neutron for SG:%s" % (sg_name)
assert uuid_ingress_ipv4 == ingress_ipv4_id, "ingress IPv4 rule uuid is not same in API \
and neutron for SG:%s" % (sg_name)
if ingress_ipv6_id:
assert ingress_ipv6_id == uuid_ingress_ipv6, "ingress IPv6 rule uuid is not same in API \
and neutron for SG:%s" % (sg_name)
if egress_ipv6_id:
assert egress_ipv6_id == uuid_egress_ipv6, "egress IPv6 rule uuid is not same in API \
and neutron for SG:%s" % (sg_name)
self.logger.info("%s security group rule uuid matches in API with neutron" % (sg_name))
# get SG rule uuid from control node and match with neutron uuid
for cn in self.inputs.bgp_ips:
uuid_egress_ipv4 = None
uuid_ingress_ipv4 = None
cn_secgrp_obj = self.cn_inspect[cn].get_cn_sec_grp(
project=prj_name,
secgrp=sg_name)
for rule in cn_secgrp_obj['obj_info'][0]['data']['security-group-entries']:
if rule['src-addresses']['security-group'] == 'local' and rule['ethertype'] == 'IPv4':
uuid_egress_ipv4 = rule['rule-uuid']
elif rule['dst-addresses']['security-group'] == 'local' and rule['ethertype'] == 'IPv4':
uuid_ingress_ipv4 = rule['rule-uuid']
elif rule['src-addresses']['security-group'] == 'local' and rule['ethertype'] == 'IPv6':
uuid_egress_ipv6 = rule['rule-uuid']
elif rule['dst-addresses']['security-group'] == 'local' and rule['ethertype'] == 'IPv6':
uuid_ingress_ipv6 = rule['rule-uuid']
assert uuid_egress_ipv4 == egress_ipv4_id, "egress rule uuid are not same in control \
and neutron for SG:%s" % (sg_name)
assert uuid_ingress_ipv4 == ingress_ipv4_id, "ingress rule uuid are not same in control \
and neutron for SG:%s" % (sg_name)
if ingress_ipv6_id:
assert ingress_ipv6_id == uuid_ingress_ipv6, "ingress IPv6 rule uuid is not same in control \
and neutron for SG:%s" % (sg_name)
if egress_ipv6_id:
assert egress_ipv6_id == uuid_egress_ipv6, "egress IPv6 rule uuid is not same in control \
and neutron for SG:%s" % (sg_name)
self.logger.info("%s security group rule uuid matches in control with neutron" % (sg_name))
return True
# end test_verify_sg_rule_uuid_in_control_api
#end class SecurityGroupRegressionTests8
class SecurityGroupRegressionTests9(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests9, cls).setUpClass()
cls.option = 'openstack'
def runTest(self):
pass
@preposttest_wrapper
def test_add_remove_default_sg_active_flow(self):
""" add/remove default SG from VM when flow is active and traffic from both ends"""
topology_class_name = sdn_sg_test_topo.sdn_topo_flow_to_sg_rule_mapping
topo_obj, config_topo = self.create_topo_setup(topology_class_name, "build_topo")
port = 10000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
sg_name = 'default'
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
filters1 = '\'(udp and src host %s and dst host %s)\'' % (
src_vm_fix.vm_ip, dst_vm_fix.vm_ip)
filters2 = '\'(tcp and src host %s and dst host %s)\'' % (
dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
sender1, receiver1 = self.start_traffic_scapy(
src_vm_fix, dst_vm_fix, 'udp', port, port, payload="payload")
sender2, receiver2 = self.start_traffic_scapy(
dst_vm_fix, src_vm_fix, 'tcp', port, port, payload="payload")
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1)
src_vm_fix.remove_security_group(secgrp=secgrp_id)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1,
src_exp_count=0, dst_exp_count=0)
src_vm_fix.add_security_group(secgrp=secgrp_id)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1)
# stop traffic
sent, recv = self.stop_traffic_scapy(sender1, receiver1)
sent, recv = self.stop_traffic_scapy(sender2, receiver2)
return True
# end test_add_remove_default_sg_active_flow
@preposttest_wrapper
def test_add_remove_sg_active_flow1(self):
""" add/remove SG from VM when flow is active
1.Traffic from both ends
2.Test for SG with rule with remote as sg for both ingress-egress"""
topology_class_name = sdn_sg_test_topo.sdn_topo_flow_to_sg_rule_mapping
topo_obj, config_topo = self.create_topo_setup(topology_class_name, "build_topo")
sg_allow_all = self.create_sec_group_allow_all()
port = 10000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
sg_name = topo_obj.sg_list[0]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
default_sg_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
'default']))
src_vm_fix.remove_security_group(secgrp=default_sg_id)
dst_vm_fix.remove_security_group(secgrp=default_sg_id)
src_vm_fix.add_security_group(secgrp=secgrp_id)
dst_vm_fix.add_security_group(secgrp=secgrp_id)
# ingress-egress from same sg
rule = [{'direction': '>',
'protocol': 'udp',
'dst_addresses': [{'security_group': topo_obj.domain + ':' + topo_obj.project + ':' + sg_name}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'udp',
'src_addresses': [{'security_group': topo_obj.domain + ':' + topo_obj.project + ':' + sg_name}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][sg_name].replace_rules(rule)
filters1 = '\'(udp and src host %s and dst host %s)\'' % (
src_vm_fix.vm_ip, dst_vm_fix.vm_ip)
filters2 = '\'(udp and src host %s and dst host %s)\'' % (
dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
sender1, receiver1 = self.start_traffic_scapy(
src_vm_fix, dst_vm_fix, 'udp', port, port, payload="payload")
sender2, receiver2 = self.start_traffic_scapy(
dst_vm_fix, src_vm_fix, 'udp', port, port, payload="payload")
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1)
src_vm_fix.remove_security_group(secgrp=secgrp_id)
src_vm_fix.add_security_group(secgrp=sg_allow_all)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1,
src_exp_count=0, dst_exp_count=0)
# stop traffic
sent, recv = self.stop_traffic_scapy(sender1, receiver1)
sent, recv = self.stop_traffic_scapy(sender2, receiver2)
src_vm_fix.remove_security_group(secgrp=sg_allow_all)
return True
# end test_add_remove_sg_active_flow1
@preposttest_wrapper
def test_add_remove_sg_active_flow2(self):
""" add/remove SG from VM when flow is active
1.Traffic from both ends
2.Test for SG with egress cidr rule,ingress sg"""
topology_class_name = sdn_sg_test_topo.sdn_topo_flow_to_sg_rule_mapping
topo_obj, config_topo = self.create_topo_setup(topology_class_name, "build_topo")
sg_allow_all = self.create_sec_group_allow_all()
port = 10000
port2 = 11000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
sg_name = topo_obj.sg_list[0]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
default_sg_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
'default']))
src_vm_fix.remove_security_group(secgrp=default_sg_id)
dst_vm_fix.remove_security_group(secgrp=default_sg_id)
src_vm_fix.add_security_group(secgrp=secgrp_id)
dst_vm_fix.add_security_group(secgrp=secgrp_id)
# start the traffic from src VM
sender1, receiver1 = self.start_traffic_scapy(
src_vm_fix, dst_vm_fix, 'udp', port, port, payload="payload")
# start the traffic from dst VM
sender2, receiver2 = self.start_traffic_scapy(
dst_vm_fix, src_vm_fix, 'udp', port2, port2, payload="payload")
# ingress from same sg and egress to all
rule = [{'direction': '>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'udp',
'src_addresses': [{'security_group': topo_obj.domain + ':' + topo_obj.project + ':' + sg_name}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][sg_name].replace_rules(rule)
filters1 = '\'(udp and src host %s and dst host %s)\'' % (
src_vm_fix.vm_ip, dst_vm_fix.vm_ip)
filters2 = '\'(udp and src host %s and dst host %s)\'' % (
dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1)
src_vm_fix.remove_security_group(secgrp=secgrp_id)
src_vm_fix.add_security_group(secgrp=sg_allow_all)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1,
dst_exp_count=0)
# stop traffic
sent, recv = self.stop_traffic_scapy(sender1, receiver1)
sent, recv = self.stop_traffic_scapy(sender2, receiver2)
src_vm_fix.remove_security_group(secgrp=sg_allow_all)
return True
# end test_add_remove_sg_active_flow2
@preposttest_wrapper
def test_add_remove_sg_active_flow3(self):
""" add/remove SG from VM when flow is active
1. Traffic from both ends
2. Test for SG with ingress cidr and egress sg"""
topology_class_name = sdn_sg_test_topo.sdn_topo_flow_to_sg_rule_mapping
topo_obj, config_topo = self.create_topo_setup(topology_class_name, "build_topo")
sg_allow_all = self.create_sec_group_allow_all()
port = 10000
port2 = 11000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
sg_name = topo_obj.sg_list[0]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
default_sg_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
'default']))
src_vm_fix.remove_security_group(secgrp=default_sg_id)
dst_vm_fix.remove_security_group(secgrp=default_sg_id)
src_vm_fix.add_security_group(secgrp=secgrp_id)
dst_vm_fix.add_security_group(secgrp=secgrp_id)
# start the traffic from src VM
sender1, receiver1 = self.start_traffic_scapy(
src_vm_fix, dst_vm_fix, 'udp', port, port, payload="payload")
# start the traffic from dst VM
sender2, receiver2 = self.start_traffic_scapy(
dst_vm_fix, src_vm_fix, 'udp', port2, port2, payload="payload")
# egress to same sg and ingress from all
rule = [{'direction': '>',
'protocol': 'udp',
'dst_addresses': [{'security_group': topo_obj.domain + ':' + topo_obj.project + ':' + sg_name}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][sg_name].replace_rules(rule)
filters1 = '\'(udp and src host %s and dst host %s)\'' % (
src_vm_fix.vm_ip, dst_vm_fix.vm_ip)
filters2 = '\'(udp and src host %s and dst host %s)\'' % (
dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1)
src_vm_fix.remove_security_group(secgrp=secgrp_id)
src_vm_fix.add_security_group(secgrp=sg_allow_all)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1,
src_exp_count=0)
# stop traffic
sent, recv = self.stop_traffic_scapy(sender1, receiver1)
sent, recv = self.stop_traffic_scapy(sender2, receiver2)
src_vm_fix.remove_security_group(secgrp=sg_allow_all)
return True
# end test_add_remove_sg_active_flow3
@preposttest_wrapper
def test_add_remove_sg_active_flow4(self):
""" add/remove SG from VM when flow is active
1. Traffic from both ends
2. Test for SG with cidr both ingress-egress"""
topology_class_name = sdn_sg_test_topo.sdn_topo_flow_to_sg_rule_mapping
topo_obj, config_topo = self.create_topo_setup(topology_class_name, "build_topo")
port = 10000
src_vm_name = 'vm1'
dst_vm_name = 'vm2'
src_vm_fix = config_topo['vm'][src_vm_name]
dst_vm_fix = config_topo['vm'][dst_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
sg_name = topo_obj.sg_list[0]
secgrp_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
sg_name]))
default_sg_id = get_secgrp_id_from_name(
self.connections,
':'.join([self.inputs.domain_name,
self.inputs.project_name,
'default']))
src_vm_fix.remove_security_group(secgrp=default_sg_id)
dst_vm_fix.remove_security_group(secgrp=default_sg_id)
src_vm_fix.add_security_group(secgrp=secgrp_id)
dst_vm_fix.add_security_group(secgrp=secgrp_id)
# start the traffic from src VM
sender1, receiver1 = self.start_traffic_scapy(
src_vm_fix, dst_vm_fix, 'udp', port, port, payload="payload")
# start the traffic from dst VM
sender2, receiver2 = self.start_traffic_scapy(
dst_vm_fix, src_vm_fix, 'udp', port, port, payload="payload")
# ingress-egress from all
rule = [{'direction': '>',
'protocol': 'udp',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'udp',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
config_topo['sec_grp'][sg_name].replace_rules(rule)
filters1 = '\'(udp and src host %s and dst host %s)\'' % (
src_vm_fix.vm_ip, dst_vm_fix.vm_ip)
filters2 = '\'(udp and src host %s and dst host %s)\'' % (
dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1)
src_vm_fix.remove_security_group(secgrp=secgrp_id)
src_vm_fix.add_security_group(secgrp=secgrp_id)
assert self.verify_traffic_on_vms(src_vm_fix, src_vn_fix,
dst_vm_fix, dst_vn_fix,
filters2, filters1)
# stop traffic
sent, recv = self.stop_traffic_scapy(sender1, receiver1)
sent, recv = self.stop_traffic_scapy(sender2, receiver2)
return True
# end test_add_remove_sg_active_flow4
#end class SecurityGroupRegressionTests9
class SecurityGroupSynAckTest(BaseSGTest, VerifySecGroup, ConfigPolicy):
@classmethod
def setUpClass(cls):
super(SecurityGroupSynAckTest, cls).setUpClass()
cls.option = 'openstack'
def runTest(self):
pass
@preposttest_wrapper
def test_syn_ack_create_flow(self):
"""
Description:
verify if SYN ack is allowed and flow is created again after flow is expired
Steps:
1. configure secgroupA with egress rule
2. configure secgroupB with ingress/egress rule
3. Make sure traffic from VM(secgrpB) to VM(secgrpA) fails as the VM(secgrpA) doesn't allow ingress traffic
4. Send traffic from VM(secgrpA) to VM(secgrpB), expected to pass through
5. Send SYN from VM(secgrpA) to VM(secgrpB).
6. recv SYN at VM(secgrpB) and Wait for flow to expire(180 sec)
7. Send SYN+ACK from VM(secgrpB) to VM(secgrpA), though the flow is expired and VM(secgrpA) denies ingress traffic, SYN_ACK packet of intial SYN should go through.
Pass criteria:
step 7 should PASS
"""
topology_class_name = sdn_sg_test_topo.sdn_topo_icmp_error_handling
topo = topology_class_name()
try:
# provided by wrapper module if run in parallel test env
topo.build_topo2(
project=self.project.project_name,
username=self.project.username,
password=self.project.password,
compute_node_list=self.inputs.compute_ips)
except (AttributeError,NameError):
topo.build_topo2(compute_node_list=self.inputs.compute_ips)
topo.sg_rules[topo.sg_list[0]] = [
{'direction': '>',
'protocol': 'any',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0',
'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
}]
topo.sg_rules[topo.sg_list[1]] = [
{'direction': '>',
'protocol': 'any',
'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0',
'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'src_addresses': [{'security_group': 'local'}],
},
{'direction': '>',
'protocol': 'any',
'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0',
'ip_prefix_len': 0}}],
'dst_ports': [{'start_port': 0, 'end_port': -1}],
'src_ports': [{'start_port': 0, 'end_port': -1}],
'dst_addresses': [{'security_group': 'local'}],
}]
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup(vms_on_single_compute=True)
self.logger.info("Setup completed with result %s" % (out['result']))
self.assertEqual(out['result'], True, out['msg'])
if out['result']:
topo_obj, config_topo = out['data']
src_vm_name = 'vm1'
src_vm_fix = config_topo['vm'][src_vm_name]
src_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[src_vm_name]]
dst_vm_name = 'vm2'
dst_vm_fix = config_topo['vm'][dst_vm_name]
dst_vn_fix = config_topo['vn'][topo_obj.vn_of_vm[dst_vm_name]]
pkg = 'syn_client.py'
self.logger.info("copying syn client to the compute node.")
path = os.getcwd() + '/tcutils/pkgs/syn_ack_test/' + pkg
host_compute = {
'username': self.inputs.host_data[src_vm_fix.vm_node_ip]['username'],
'password': self.inputs.host_data[src_vm_fix.vm_node_ip]['password'],
'ip': src_vm_fix.vm_node_ip}
copy_file_to_server(host_compute, path, '/tmp', pkg)
self.logger.info("copying syn client from compute node to VM")
with settings(host_string='%s@%s' % (self.inputs.username,
src_vm_fix.vm_node_ip),
password=self.inputs.password, warn_only=True,
abort_on_prompts=False):
path = '/tmp/' + pkg
output = fab_put_file_to_vm(
host_string='%s@%s' %
(src_vm_fix.vm_username,
src_vm_fix.local_ip),
password=src_vm_fix.vm_password,
src=path,
dest='/tmp')
pkg = 'syn_server.py'
self.logger.info("copying syn server to the compute node.")
path = os.getcwd() + '/tcutils/pkgs/syn_ack_test/' + pkg
host_compute = {
'username': self.inputs.username,
'password': self.inputs.password,
'ip': dst_vm_fix.vm_node_ip}
copy_file_to_server(host_compute, path, '/tmp', pkg)
self.logger.info("copying syn server from compute node to VM")
with settings(host_string='%s@%s' % (self.inputs.username,
dst_vm_fix.vm_node_ip),
password=self.inputs.password, warn_only=True,
abort_on_prompts=False):
path = '/tmp/' + pkg
output = fab_put_file_to_vm(
host_string='%s@%s' %
(dst_vm_fix.vm_username,
dst_vm_fix.local_ip),
password=dst_vm_fix.vm_password,
src=path,
dest='/tmp')
cmd1 = 'chmod +x /tmp/syn_server.py;/tmp/syn_server.py %s %s \
2>/tmp/server.log 1>/tmp/server.log' \
% (src_vm_fix.vm_ip, dst_vm_fix.vm_ip)
cmd2 = 'chmod +x /tmp/syn_client.py;/tmp/syn_client.py %s %s \
2>/tmp/client.log 1>/tmp/client.log' \
% (dst_vm_fix.vm_ip, src_vm_fix.vm_ip)
output_cmd_dict = dst_vm_fix.run_cmd_on_vm(cmds=[cmd1],
as_sudo=True, as_daemon=True)
output_cmd_dict = src_vm_fix.run_cmd_on_vm(cmds=[cmd2],
as_sudo=True, as_daemon=True)
sleep(1)
#verify flow created
inspect_h1 = self.agent_inspect[src_vm_fix.vm_node_ip]
flow_rec1 = None
sport = '8100'
dport = '8000'
vn_fq_name=src_vm_fix.vn_fq_name
flow_timeout = 180
flow_rec1 = inspect_h1.get_vna_fetchflowrecord(
nh=src_vm_fix.tap_intf[vn_fq_name]['flow_key_idx'],
sip=src_vm_fix.vm_ip,
dip=dst_vm_fix.vm_ip,
sport=sport,
dport=dport,
protocol='6')
assert flow_rec1
#wait for flow to expire
sleep(flow_timeout+2)
#verify flow created again
flow_rec1 = inspect_h1.get_vna_fetchflowrecord(
nh=src_vm_fix.tap_intf[vn_fq_name]['flow_key_idx'],
sip=src_vm_fix.vm_ip,
dip=dst_vm_fix.vm_ip,
sport=sport,
dport=dport,
protocol='6')
assert flow_rec1
flow_rec1 = inspect_h1.get_vna_fetchflowrecord(
nh=dst_vm_fix.tap_intf[vn_fq_name]['flow_key_idx'],
sip=dst_vm_fix.vm_ip,
dip=src_vm_fix.vm_ip,
sport=dport,
dport=sport,
protocol='6')
assert flow_rec1
return True
#end test_syn_ack_create_flow
# end class SecurityGroupSynAckTest
#creating new classes to run all tests with contrail apis
class SecurityGroupBasicRegressionTests1_contrail(test_regression_basic.SecurityGroupBasicRegressionTests1):
@classmethod
def setUpClass(cls):
super(SecurityGroupBasicRegressionTests1_contrail, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests2_contrail(SecurityGroupRegressionTests2):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests2, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests3_contrail(SecurityGroupRegressionTests3):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests3, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests4_contrail(SecurityGroupRegressionTests4):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests4, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests5_contrail(SecurityGroupRegressionTests5):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests5, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests6_contrail(SecurityGroupRegressionTests6):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests6, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests7_contrail(SecurityGroupRegressionTests7):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests7, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests8_contrail(SecurityGroupRegressionTests8):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests8, cls).setUpClass()
cls.option = 'contrail'
class SecurityGroupRegressionTests9_contrail(SecurityGroupRegressionTests9):
@classmethod
def setUpClass(cls):
super(SecurityGroupRegressionTests9, cls).setUpClass()
cls.option = 'contrail'
| 44.661329
| 175
| 0.56728
| 13,541
| 111,564
| 4.378923
| 0.041356
| 0.020153
| 0.018619
| 0.021756
| 0.859297
| 0.839059
| 0.816966
| 0.805363
| 0.779138
| 0.763926
| 0
| 0.020052
| 0.319207
| 111,564
| 2,497
| 176
| 44.679215
| 0.760638
| 0.055179
| 0
| 0.764063
| 0
| 0.000546
| 0.150485
| 0.002681
| 0
| 0
| 0
| 0
| 0.042054
| 0
| null | null | 0.020754
| 0.013108
| null | null | 0.000546
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
93a6de8e61399b4c850d319f8c8b96c4a3389f89
| 37
|
py
|
Python
|
assn3/Q1input.py
|
vardhan2000/1st-sem-python-assignments
|
9f38ab2b15c36b5ae1c6a725f4d4effe026e0bb4
|
[
"MIT"
] | null | null | null |
assn3/Q1input.py
|
vardhan2000/1st-sem-python-assignments
|
9f38ab2b15c36b5ae1c6a725f4d4effe026e0bb4
|
[
"MIT"
] | null | null | null |
assn3/Q1input.py
|
vardhan2000/1st-sem-python-assignments
|
9f38ab2b15c36b5ae1c6a725f4d4effe026e0bb4
|
[
"MIT"
] | null | null | null |
inp = [1, 1, 3, 2, 1, 2, 3, 2, 2, 2]
| 18.5
| 36
| 0.351351
| 11
| 37
| 1.181818
| 0.363636
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0.324324
| 37
| 1
| 37
| 37
| 0.12
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
93f5a9eb513267ed2019d25eca26d1ec076fa594
| 9,197
|
py
|
Python
|
test/orchestrate/options_validator/service_test.py
|
emattia/sigopt-python
|
e6b4e5240261ddbdc84a3b4061b8935873612c23
|
[
"MIT"
] | 67
|
2015-03-01T02:16:47.000Z
|
2021-05-10T16:17:21.000Z
|
test/orchestrate/options_validator/service_test.py
|
emattia/sigopt-python
|
e6b4e5240261ddbdc84a3b4061b8935873612c23
|
[
"MIT"
] | 150
|
2015-10-22T21:59:37.000Z
|
2022-03-10T00:55:19.000Z
|
test/orchestrate/options_validator/service_test.py
|
emattia/sigopt-python
|
e6b4e5240261ddbdc84a3b4061b8935873612c23
|
[
"MIT"
] | 19
|
2016-07-10T03:46:33.000Z
|
2022-02-05T12:13:01.000Z
|
import pytest
from mock import Mock
from sigopt.orchestrate.options_validator.service import OptionsValidatorService
class TestOptionsValidatorService(object):
@pytest.fixture()
def options_validator_service(self):
services = Mock()
return OptionsValidatorService(services)
@pytest.mark.parametrize('resource', [
{'requests': {'cpu': 1, 'memory': '200Gi'}, 'gpus': 1},
{'limits': {'cpu': '200m', 'memory': 200}},
{'requests': None, 'gpus': 1},
{'requests': {'cpu': 1, 'memory': '200Gi'}, 'gpus': None},
])
def test_validate_resources(self, options_validator_service, resource):
options_validator_service.validate_resources(**resource)
@pytest.mark.parametrize('resource', [
{'requests': {'cpu': 1, 'memory': '200Gi'}, 'gpus': -1},
{'limits': {'cpu': '200m', 'memory': 200}, 'requests': 55},
])
def test_orchestrate_resources_bad(self, options_validator_service, resource):
with pytest.raises(AssertionError):
options_validator_service.validate_resources(**resource)
@pytest.mark.parametrize('gpus', [-1, [], dict()])
def test_validate_resources_wrong_type(self, options_validator_service, gpus):
with pytest.raises(AssertionError):
options_validator_service.validate_resources(gpus=gpus)
def test_validate_aws(self, options_validator_service):
options_validator_service.validate_aws_for_orchestrate(
aws_access_key_id='foobar',
aws_secret_access_key='barfoo',
)
options_validator_service.validate_aws_for_cluster(
aws_access_key_id='foobar',
aws_secret_access_key='barfoo',
additional_policies=['bar']
)
def test_validate_aws_simple(self, options_validator_service):
options_validator_service.validate_aws_for_orchestrate()
options_validator_service.validate_aws_for_cluster()
def test_validate_aws_rejects_ecr(self, options_validator_service):
with pytest.raises(TypeError):
options_validator_service.validate_aws_for_cluster(
ecr=dict(
image='orchestrate/test',
),
)
with pytest.raises(TypeError):
options_validator_service.validate_aws_for_orchestrate(
ecr=dict(
image='orchestrate/test',
),
)
with pytest.raises(TypeError):
options_validator_service.validate_aws_for_orchestrate(
ecr=dict(),
)
def test_validate_aws_additional_policies(self, options_validator_service):
options_validator_service.validate_aws_for_cluster(additional_policies=[])
options_validator_service.validate_aws_for_cluster(additional_policies=None)
with pytest.raises(AssertionError):
options_validator_service.validate_aws_for_cluster(additional_policies='policy')
def test_validate_sigopt(self, options_validator_service):
options_validator_service.validate_sigopt(
api_token='foobar',
)
def test_validate_sigopt_simple(self, options_validator_service):
options_validator_service.validate_sigopt()
@pytest.mark.parametrize('api_token', ['', 0])
def test_validate_sigopt_wrong_value(self, options_validator_service, api_token):
with pytest.raises(AssertionError):
options_validator_service.validate_sigopt(
api_token=api_token,
)
def test_validate_cluster_options(self, options_validator_service):
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name='test-cluster',
cpu=dict(
instance_type='t2.small',
min_nodes=1,
max_nodes=1,
),
gpu=dict(
instance_type='p3.2xlarge',
min_nodes=2,
max_nodes=2,
),
system=dict(
instance_type='t3.small',
min_nodes=1,
max_nodes=2,
),
)
def test_validate_cluster_options_ok_missing_values(self, options_validator_service):
options_validator_service.validate_cluster_options(
cluster_name='test-cluster',
provider='custom',
cpu=dict(
instance_type='t2.small',
min_nodes=1,
max_nodes=1,
),
gpu=dict(
instance_type='p3.2xlarge',
min_nodes=2,
max_nodes=2,
),
system=dict(
instance_type='t3.small',
min_nodes=1,
max_nodes=2,
),
)
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name='test-cluster',
gpu=dict(
instance_type='p3.2xlarge',
min_nodes=2,
max_nodes=2,
),
system=dict(
instance_type='t3.small',
min_nodes=1,
max_nodes=2,
),
)
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name='test-cluster',
cpu=dict(
instance_type='t2.small',
min_nodes=1,
max_nodes=1,
),
system=dict(
instance_type='t3.small',
min_nodes=1,
max_nodes=2,
),
)
@pytest.mark.parametrize('cluster_name', ['', None, dict()])
def test_validate_cluster_options_cluster_name(self, options_validator_service, cluster_name):
with pytest.raises(AssertionError):
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name=cluster_name,
cpu=dict(
instance_type='t2.small',
min_nodes=1,
max_nodes=1,
),
system=dict(
instance_type='t3.small',
min_nodes=1,
max_nodes=2,
),
)
def test_validate_cluster_options_extra_options(self, options_validator_service):
with pytest.raises(AssertionError):
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name='test-cluster',
tpu=dict(
instance_type='p3.2xlarge',
min_nodes=2,
max_nodes=2,
),
)
def test_validate_cluster_options_wrong_type(self, options_validator_service):
with pytest.raises(AssertionError):
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name='test-cluster',
gpu=[dict(
instance_type='p3.2xlarge',
min_nodes=2,
max_nodes=2,
)],
)
with pytest.raises(AssertionError):
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name='test-cluster',
cpu=[dict(
instance_type='t2.small',
min_nodes=1,
max_nodes=1,
)],
system=dict(
instance_type='t3.small',
min_nodes=1,
max_nodes=2,
),
)
def test_validate_cluster_options_ignore_values(self, options_validator_service):
options_validator_service.validate_cluster_options(
provider='aws',
cluster_name='test-cluster',
cpu=dict(
instance_type='t2.small',
min_nodes=1,
max_nodes=1,
),
system=dict(
instance_type='t3.small',
min_nodes=1,
max_nodes=2,
),
)
def test_validate_worker_stack(self, options_validator_service):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type='t2.small',
min_nodes=1,
max_nodes=1,
)
def test_validate_worker_stack_ignores_values(self, options_validator_service):
options_validator_service.validate_worker_stack(
name='foobar',
instance_type='bazzle',
min_nodes=2,
max_nodes=19,
)
def test_validate_worker_stack_missing_options(self, options_validator_service):
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
min_nodes=1,
max_nodes=1,
)
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type='t2.small',
max_nodes=1,
)
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type='t2.small',
min_nodes=1,
)
def test_validate_worker_stack_wrong_type(self, options_validator_service):
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type=2,
min_nodes=1,
max_nodes=1,
)
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type='t2.small',
min_nodes='1',
max_nodes=1,
)
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type='t2.small',
min_nodes=1,
max_nodes='1',
)
def test_validate_worker_stack_negative(self, options_validator_service):
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type='t2.small',
min_nodes=-1,
max_nodes=1,
)
with pytest.raises(AssertionError):
options_validator_service.validate_worker_stack(
name='cpu',
instance_type='t2.small',
min_nodes=1,
max_nodes=-1,
)
| 28.921384
| 96
| 0.662934
| 1,030
| 9,197
| 5.545631
| 0.08835
| 0.162465
| 0.233543
| 0.189951
| 0.857143
| 0.821429
| 0.810399
| 0.780637
| 0.759629
| 0.669993
| 0
| 0.016565
| 0.232032
| 9,197
| 317
| 97
| 29.012618
| 0.792156
| 0
| 0
| 0.686833
| 0
| 0
| 0.067957
| 0
| 0
| 0
| 0
| 0
| 0.05694
| 1
| 0.078292
| false
| 0
| 0.010676
| 0
| 0.096085
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f59d2ce73b14540ef0bbbe9d3f9a75a7cdc319ac
| 100
|
py
|
Python
|
GcMessageProtocol/__init__.py
|
GameCompanyGC/Python-GCDiscordMessageProtocol
|
fc4b565ea7b0df8d26e43d997efa17d1eef09c69
|
[
"MIT"
] | null | null | null |
GcMessageProtocol/__init__.py
|
GameCompanyGC/Python-GCDiscordMessageProtocol
|
fc4b565ea7b0df8d26e43d997efa17d1eef09c69
|
[
"MIT"
] | null | null | null |
GcMessageProtocol/__init__.py
|
GameCompanyGC/Python-GCDiscordMessageProtocol
|
fc4b565ea7b0df8d26e43d997efa17d1eef09c69
|
[
"MIT"
] | null | null | null |
from GcMessageProtocol.protocols import Protocol
from GcMessageProtocol.protocols import BanProtocol
| 50
| 51
| 0.91
| 10
| 100
| 9.1
| 0.6
| 0.461538
| 0.659341
| 0.791209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07
| 100
| 2
| 51
| 50
| 0.978495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
192057cdd8c8f82fbbb777176441c043656bb5ab
| 34,043
|
py
|
Python
|
peripy/test/test_integrators.py
|
jd-bartlett96/PeriPy
|
04cae072e22568e29b9ab8f84d821f9576ecb728
|
[
"MIT"
] | 17
|
2020-12-16T15:17:47.000Z
|
2022-03-03T08:04:54.000Z
|
peripy/test/test_integrators.py
|
jd-bartlett96/PeriPy
|
04cae072e22568e29b9ab8f84d821f9576ecb728
|
[
"MIT"
] | 49
|
2020-01-29T16:50:23.000Z
|
2020-09-02T10:27:25.000Z
|
peripy/test/test_integrators.py
|
jd-bartlett96/PeriPy
|
04cae072e22568e29b9ab8f84d821f9576ecb728
|
[
"MIT"
] | 11
|
2021-07-05T14:03:58.000Z
|
2022-03-20T14:22:44.000Z
|
"""Tests for the integrators module."""
from .conftest import context_available
from ..integrators import (
Integrator, Euler, EulerCL, EulerCromerCL, VelocityVerletCL, ContextError)
from ..model import Model, initial_crack_helper
from ..cl import get_context
import pytest
import numpy as np
import pyopencl as cl
@initial_crack_helper
def is_crack(x, y):
"""Determine whether a pair of particles define the crack."""
crack_length = 0.3
output = 0
p1 = x
p2 = y
if x[0] > y[0]:
p2 = x
p1 = y
# 1e-6 makes it fall one side of central line of particles
if p1[0] < 0.5 + 1e-6 and p2[0] > 0.5 + 1e-6:
# draw a straight line between them
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
c = p1[1] - m * p1[0]
# height a x = 0.5
height = m * 0.5 + c
if (height > 0.5 * (1 - crack_length)
and height < 0.5 * (1 + crack_length)):
output = 1
return output
def is_density(x):
"""Return the density of the nodal volume."""
return 1.0
@pytest.fixture(scope="module")
def euler_integrator(data_path, simple_displacement_boundary):
"""Run the example simulation on the Euler integrator."""
path = data_path
mesh_file = path / "example_mesh.vtk"
euler = Euler(dt=1e-3)
# Create model
model = Model(mesh_file, integrator=euler, horizon=0.1,
critical_stretch=0.005,
bond_stiffness=18.0 * 0.05 / (np.pi * 0.1**4),
is_displacement_boundary=simple_displacement_boundary,
initial_crack=is_crack)
return model, euler
@pytest.fixture(scope="module")
def euler_cl_integrator(data_path, simple_displacement_boundary):
"""Run the example simulation on the EulerCL integrator."""
path = data_path
mesh_file = path / "example_mesh.vtk"
euler = EulerCL(dt=1e-3)
# Create model
model = Model(mesh_file, integrator=euler, horizon=0.1,
critical_stretch=0.005,
bond_stiffness=18.0 * 0.05 / (np.pi * 0.1**4),
is_displacement_boundary=simple_displacement_boundary,
initial_crack=is_crack)
return model, euler
@pytest.fixture(scope="module")
def euler_cromer_cl_integrator(data_path, simple_displacement_boundary):
"""Run the example simulation on the EulerCromerCL integrator."""
path = data_path
mesh_file = path / "example_mesh.vtk"
euler = EulerCromerCL(dt=1e-3, damping=0.0)
# Create model
model = Model(mesh_file, integrator=euler, horizon=0.1,
critical_stretch=0.005,
bond_stiffness=18.0 * 0.05 / (np.pi * 0.1**4),
is_displacement_boundary=simple_displacement_boundary,
initial_crack=is_crack,
is_density=is_density)
return model, euler
@pytest.fixture(scope="module")
def velocity_verlet_cl_integrator(data_path, simple_displacement_boundary):
"""Run the example simulation on the VelocityVerletCL integrator =."""
path = data_path
mesh_file = path / "example_mesh.vtk"
euler = VelocityVerletCL(dt=1e-3, damping=0.0)
# Create model
model = Model(mesh_file, integrator=euler, horizon=0.1,
critical_stretch=0.005,
bond_stiffness=18.0 * 0.05 / (np.pi * 0.1**4),
is_displacement_boundary=simple_displacement_boundary,
initial_crack=is_crack,
is_density=is_density)
return model, euler
def test_no_context(data_path, monkeypatch):
"""Test raising error when no suitable device is found."""
from .. import integrators
# Mock the get_context function to return None as it would if no suitable
# device is found.
def return_none():
return None
monkeypatch.setattr(integrators, "get_context", return_none)
with pytest.raises(ContextError) as exception:
EulerCL(dt=1)
assert "No suitable context was found." in exception.value
@context_available
def test_custom_context():
"""Test constructing an EulerCL object using the context argument."""
context = get_context()
integrator = EulerCL(dt=1, context=context)
assert integrator.context is context
def test_invalid_custom_context():
"""Test constructing an EulerCL object using the context argument."""
with pytest.raises(TypeError) as exception:
EulerCL(dt=1, context=5)
assert "context must be a pyopencl Context object" in exception.value
class TestIntegrator:
"""ABC class tests."""
def test_not_implemented_error(self):
"""Ensure the ABC cannot be instantiated."""
with pytest.raises(TypeError):
Integrator(dt=1)
class TestEuler:
"""EulerCL integrator tests. See test_euler.py for more tests."""
def test_call(self, data_path, euler_integrator):
"""Regression test for the Euler integrator."""
path = data_path
model, integrator = euler_integrator
nlist, n_neigh = model.initial_connectivity
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = None
integrator.create_buffers(
nlist, n_neigh, model.bond_stiffness, model.critical_stretch,
model.plus_cs, u, ud, udd, force, body_force, damage, regimes,
model.nregimes, model.nbond_types)
displacement_bc_magnitudes = 0.00001 / 2 * np.linspace(
1, 10, 10)
for step in range(10):
integrator.__call__(
displacement_bc_magnitude=displacement_bc_magnitudes[step],
force_bc_magnitude=0.0)
u_expected = np.load(path/"expected_displacements.npy")
force_expected = np.load(path/"expected_force.npy")
damage_expected = np.load(path/"expected_damage.npy")
expected_connectivity = np.load(path/"expected_connectivity_crack.npz")
nlist_expected = expected_connectivity["nlist"]
n_neigh_expected = expected_connectivity["n_neigh"]
(u_actual,
_,
_,
force_actual,
_,
damage_actual,
nlist_actual,
n_neigh_actual
) = integrator.write(
u, ud, udd, force, body_force, damage, nlist, n_neigh)
assert np.allclose(u_actual, u_expected)
assert np.allclose(force_actual, force_expected)
assert np.allclose(damage_actual, damage_expected)
assert np.allclose(nlist_actual, nlist_expected)
assert np.allclose(n_neigh_actual, n_neigh_expected)
def test_create_buffers_nregimes(self, euler_integrator):
"""Test exception when n_regimes is supplied to Euler."""
model, integrator = euler_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = model.plus_cs
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = None
nregimes = 2
nbond_types = 1
with pytest.raises(ValueError) as exception:
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert (
str("n-linear damage model's are not") in exception.value)
def test_create_buffers_n_bond_types(self, euler_integrator):
"""Test exception when n_bond_types is supplied to Euler."""
model, integrator = euler_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = model.plus_cs
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = None
nregimes = 1
nbond_types = 2
with pytest.raises(ValueError) as exception:
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert (
str("n-material composite models are not") in exception.value)
def test_create_buffers(self, euler_integrator):
"""Test initiation of arrays that are dependent on simulation."""
model, integrator = euler_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = model.plus_cs
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes), dtype=np.float64)
regimes = None
nregimes = 1
nbond_types = 1
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert np.allclose(integrator.nlist, nlist)
assert np.allclose(integrator.n_neigh, n_neigh)
assert np.allclose(integrator.bond_stiffness, bond_stiffness)
assert np.allclose(integrator.critical_stretch, critical_stretch)
assert np.allclose(integrator.u, u)
assert np.allclose(integrator.ud, ud)
assert np.allclose(integrator.udd, udd)
assert np.allclose(integrator.force, force)
assert np.allclose(integrator.body_force, body_force)
def test_build(self, euler_integrator):
"""Test initiate integrator arrays."""
model, integrator = euler_integrator
nnodes = model.nnodes
degrees_freedom = model.degrees_freedom
max_neighbours = model.max_neighbours
coords = model.coords
family = model.family
volume = model.volume
bc_types = model.bc_types
bc_values = model.bc_values
force_bc_types = model.force_bc_types
force_bc_values = model.force_bc_values
stiffness_corrections = None
bond_types = None
densities = None
integrator.build(
nnodes, degrees_freedom, max_neighbours, coords,
volume, family, bc_types, bc_values, force_bc_types,
force_bc_values, stiffness_corrections, bond_types, densities)
assert np.allclose(integrator.nnodes, nnodes)
assert np.allclose(integrator.coords, coords)
assert np.allclose(integrator.family, family)
assert np.allclose(integrator.volume, volume)
assert np.allclose(integrator.bc_types, bc_types)
assert np.allclose(integrator.bc_values, bc_values)
assert np.allclose(integrator.force_bc_types, force_bc_types)
assert np.allclose(integrator.force_bc_values, force_bc_values)
def test_build_exception(self, euler_integrator):
"""Test initiatiation of integrator arrays."""
model, integrator = euler_integrator
nnodes = model.nnodes
degrees_freedom = model.degrees_freedom
max_neighbours = model.max_neighbours
coords = model.coords
family = model.family
volume = model.volume
bc_types = model.bc_types
bc_values = model.bc_values
force_bc_types = model.force_bc_types
force_bc_values = model.force_bc_values
stiffness_corrections = None
bond_types = 1
densities = None
with pytest.raises(ValueError) as exception:
integrator.build(
nnodes, degrees_freedom, max_neighbours, coords,
volume, family, bc_types, bc_values, force_bc_types,
force_bc_values, stiffness_corrections, bond_types, densities)
assert (
str("bond_types are not supported by this") in exception.value)
def test_build_exception_stiffness_corrections(self, euler_integrator):
"""Test exception when stiffness_corrections are applied to Euler."""
model, integrator = euler_integrator
nnodes = model.nnodes
degrees_freedom = model.degrees_freedom
max_neighbours = model.max_neighbours
coords = model.coords
family = model.family
volume = model.volume
bc_types = model.bc_types
bc_values = model.bc_values
force_bc_types = model.force_bc_types
force_bc_values = model.force_bc_values
stiffness_corrections = 1
bond_types = None
densities = None
with pytest.raises(ValueError) as exception:
integrator.build(
nnodes, degrees_freedom, max_neighbours, coords,
volume, family, bc_types, bc_values, force_bc_types,
force_bc_values, stiffness_corrections, bond_types, densities)
assert (
str("stiffness_corrections are not") in exception.value)
def test_build_exception_densities(self, euler_integrator):
"""Test exception when densities are applied to Euler."""
model, integrator = euler_integrator
nnodes = model.nnodes
degrees_freedom = model.degrees_freedom
max_neighbours = model.max_neighbours
coords = model.coords
family = model.family
volume = model.volume
bc_types = model.bc_types
bc_values = model.bc_values
force_bc_types = model.force_bc_types
force_bc_values = model.force_bc_values
stiffness_corrections = None
bond_types = None
densities = 1
with pytest.raises(ValueError) as exception:
integrator.build(
nnodes, degrees_freedom, max_neighbours, coords,
volume, family, bc_types, bc_values, force_bc_types,
force_bc_values, stiffness_corrections, bond_types, densities)
assert (
str("densities are not supported") in exception.value)
def test_create_special_buffers(self, euler_integrator):
"""Test for no special buffers for this integrator."""
model, integrator = euler_integrator
value = integrator._create_special_buffers()
assert value is None
def test_build_special(self, euler_integrator):
"""There for no special programs for this integrator."""
model, integrator = euler_integrator
value = integrator._build_special()
assert value is None
class TestEulerCL:
"""Euler integrator tests. See test_euler.py for more tests."""
@context_available
def test_call(self, data_path, euler_cl_integrator):
"""Regression test for the EulerCL integrator."""
path = data_path
model, integrator = euler_cl_integrator
nlist, n_neigh = model.initial_connectivity
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes), dtype=np.float64)
regimes = None
integrator.create_buffers(
nlist, n_neigh, model.bond_stiffness, model.critical_stretch,
model.plus_cs, u, ud, udd, force, body_force, damage, regimes,
model.nregimes, model.nbond_types)
displacement_bc_magnitudes = 0.00001 / 2 * np.linspace(
1, 10, 10)
for step in range(10):
integrator.__call__(
displacement_bc_magnitude=displacement_bc_magnitudes[step],
force_bc_magnitude=0.0)
u_expected = np.load(path/"expected_displacements.npy")
force_expected = np.load(path/"expected_force.npy")
damage_expected = np.load(path/"expected_damage.npy")
expected_connectivity = np.load(
path/"expected_connectivity_crack_cl.npz")
nlist_expected = expected_connectivity["nlist"]
n_neigh_expected = expected_connectivity["n_neigh"]
(u_actual,
_,
_,
force_actual,
_,
damage_actual,
nlist_actual,
n_neigh_actual
) = integrator.write(
u, ud, udd, force, body_force, damage, nlist, n_neigh)
assert np.allclose(u_actual, u_expected)
assert np.allclose(force_actual, force_expected)
assert np.allclose(damage_actual, damage_expected)
assert np.allclose(nlist_actual, nlist_expected)
assert np.allclose(n_neigh_actual, n_neigh_expected)
@context_available
def test_create_buffers_float(self, euler_cl_integrator):
"""Test initiation of arrays that are dependent on simulation."""
model, integrator = euler_cl_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = model.plus_cs
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = None
nregimes = 1
nbond_types = 1
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert(type(integrator.bond_stiffness_d) is np.float64)
assert(type(integrator.critical_stretch_d) is np.float64)
@context_available
def test_create_buffers_array(self, euler_cl_integrator):
"""Test initiation of arrays that are dependent on simulation."""
model, integrator = euler_cl_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = np.zeros((2, 2), dtype=np.float64)
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = np.zeros(
(model.nnodes, model.max_neighbours), dtype=np.float64)
nregimes = 2
nbond_types = 2
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert(type(integrator.bond_stiffness_d) is cl._cl.Buffer)
assert(type(integrator.critical_stretch_d) is cl._cl.Buffer)
@context_available
def test_build_exception(self, euler_cl_integrator):
"""Test exception when densities are supplied to EulerCL."""
model, integrator = euler_cl_integrator
nnodes = model.nnodes
degrees_freedom = model.degrees_freedom
max_neighbours = model.max_neighbours
coords = model.coords
family = model.family
volume = model.volume
bc_types = model.bc_types
bc_values = model.bc_values
force_bc_types = model.force_bc_types
force_bc_values = model.force_bc_values
stiffness_corrections = None
bond_types = None
densities = 1
with pytest.raises(ValueError) as exception:
integrator.build(
nnodes, degrees_freedom, max_neighbours, coords,
volume, family, bc_types, bc_values, force_bc_types,
force_bc_values, stiffness_corrections, bond_types, densities)
assert (
str("densities are not supported") in exception.value)
@context_available
def test_create_special_buffers(self, euler_cl_integrator):
"""There are no special buffers so this method does nothing."""
model, integrator = euler_cl_integrator
value = integrator._create_special_buffers()
assert value is None
class TestEulerCromerCL:
"""EulerCromerCL integrator tests. See test_euler.py for more tests."""
@context_available
def test_call(self, data_path, euler_cromer_cl_integrator):
"""Regression test for the EulerCromerCL integrator."""
path = data_path
model, integrator = euler_cromer_cl_integrator
nlist, n_neigh = model.initial_connectivity
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes), dtype=np.float64)
regimes = None
integrator.create_buffers(
nlist, n_neigh, model.bond_stiffness, model.critical_stretch,
model.plus_cs, u, ud, udd, force, body_force, damage, regimes,
model.nregimes, model.nbond_types)
displacement_bc_magnitudes = 0.00001 / 2 * np.linspace(
1, 10, 10)
for step in range(10):
integrator.__call__(
displacement_bc_magnitude=displacement_bc_magnitudes[step],
force_bc_magnitude=0.0)
(u_actual,
ud_actual,
_,
force_actual,
_,
damage_actual,
nlist_actual,
n_neigh_actual
) = integrator.write(
u, ud, udd, force, body_force, damage, nlist, n_neigh)
u_expected = np.load(path/"expected_displacements_euler_cromer.npy")
force_expected = np.load(path/"expected_force_euler_cromer.npy")
damage_expected = np.load(path/"expected_damage_euler_cromer.npy")
expected_connectivity = np.load(
path/"expected_connectivity_euler_cromer_cl.npz")
nlist_expected = expected_connectivity["nlist"]
n_neigh_expected = expected_connectivity["n_neigh"]
assert np.allclose(u_actual, u_expected)
assert np.allclose(force_actual, force_expected)
assert np.allclose(damage_actual, damage_expected)
assert np.allclose(nlist_actual, nlist_expected)
assert np.allclose(n_neigh_actual, n_neigh_expected)
@context_available
def test_create_buffers_float(self, euler_cromer_cl_integrator):
"""Test initiation of arrays that are dependent on simulation."""
model, integrator = euler_cromer_cl_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = model.plus_cs
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = None
nregimes = 1
nbond_types = 1
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert(type(integrator.bond_stiffness_d) is np.float64)
assert(type(integrator.critical_stretch_d) is np.float64)
@context_available
def test_create_buffers_array(self, euler_cromer_cl_integrator):
"""Test initiation of arrays that are dependent on simulation."""
model, integrator = euler_cromer_cl_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = np.zeros((2, 2), dtype=np.float64)
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = np.zeros(
(model.nnodes, model.max_neighbours), dtype=np.float64)
nregimes = 2
nbond_types = 2
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert(type(integrator.bond_stiffness_d) is cl._cl.Buffer)
assert(type(integrator.critical_stretch_d) is cl._cl.Buffer)
@context_available
def test_build_exception(self, euler_cromer_cl_integrator):
"""Test exception when densities are not supplied to EulerCromerCL."""
model, integrator = euler_cromer_cl_integrator
nnodes = model.nnodes
degrees_freedom = model.degrees_freedom
max_neighbours = model.max_neighbours
coords = model.coords
family = model.family
volume = model.volume
bc_types = model.bc_types
bc_values = model.bc_values
force_bc_types = model.force_bc_types
force_bc_values = model.force_bc_values
stiffness_corrections = None
bond_types = None
densities = None
with pytest.raises(ValueError) as exception:
integrator.build(
nnodes, degrees_freedom, max_neighbours, coords,
volume, family, bc_types, bc_values, force_bc_types,
force_bc_values, stiffness_corrections, bond_types, densities)
assert (
str("densities must be supplied") in exception.value)
@context_available
def test_create_special_buffers(self, euler_cromer_cl_integrator):
"""There are no special buffers so this method does nothing."""
model, integrator = euler_cromer_cl_integrator
value = integrator._create_special_buffers()
assert value is None
class TestVelocityVerletCL:
"""VelocityVerletCL integrator tests. See test_euler.py for more tests."""
@context_available
def test_call(self, data_path, velocity_verlet_cl_integrator):
"""Regression test for the VelocityVerletCL integrator."""
path = data_path
model, integrator = velocity_verlet_cl_integrator
nlist, n_neigh = model.initial_connectivity
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes), dtype=np.float64)
regimes = None
integrator.create_buffers(
nlist, n_neigh, model.bond_stiffness, model.critical_stretch,
model.plus_cs, u, ud, udd, force, body_force, damage, regimes,
model.nregimes, model.nbond_types)
displacement_bc_magnitudes = 0.00001 / 2 * np.linspace(
1, 10, 10)
for step in range(10):
integrator.__call__(
displacement_bc_magnitude=displacement_bc_magnitudes[step],
force_bc_magnitude=0.0)
(u_actual,
ud_actual,
udd_actual,
force_actual,
_,
damage_actual,
nlist_actual,
n_neigh_actual
) = integrator.write(
u, ud, udd, force, body_force, damage, nlist, n_neigh)
u_expected = np.load(path/"expected_displacements_velocity_verlet.npy")
ud_expected = np.load(path/"expected_velocities_velocity_verlet.npy")
udd_expected = np.load(
path/"expected_accelerations_velocity_verlet.npy")
force_expected = np.load(path/"expected_force_velocity_verlet.npy")
damage_expected = np.load(path/"expected_damage_velocity_verlet.npy")
expected_connectivity = np.load(
path/"expected_connectivity_velocity_verlet_cl.npz")
nlist_expected = expected_connectivity["nlist"]
n_neigh_expected = expected_connectivity["n_neigh"]
assert np.allclose(u_actual, u_expected)
assert np.allclose(ud_actual, ud_expected)
assert np.allclose(udd_actual, udd_expected)
assert np.allclose(force_actual, force_expected)
assert np.allclose(damage_actual, damage_expected)
assert np.allclose(nlist_actual, nlist_expected)
assert np.allclose(n_neigh_actual, n_neigh_expected)
@context_available
def test_create_buffers_float(self, velocity_verlet_cl_integrator):
"""Test initiation of arrays that are dependent on simulation."""
model, integrator = velocity_verlet_cl_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = model.plus_cs
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = None
nregimes = 1
nbond_types = 1
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert(type(integrator.bond_stiffness_d) is np.float64)
assert(type(integrator.critical_stretch_d) is np.float64)
@context_available
def test_create_buffers_array(self, velocity_verlet_cl_integrator):
"""Test initiation of arrays that are dependent on simulation."""
model, integrator = velocity_verlet_cl_integrator
nlist, n_neigh = model.initial_connectivity
bond_stiffness = model.bond_stiffness
critical_stretch = model.critical_stretch
plus_cs = np.zeros((2, 2), dtype=np.float64)
u = np.zeros((model.nnodes, 3), dtype=np.float64)
ud = np.zeros((model.nnodes, 3), dtype=np.float64)
udd = np.zeros((model.nnodes, 3), dtype=np.float64)
force = np.zeros((model.nnodes, 3), dtype=np.float64)
body_force = np.zeros((model.nnodes, 3), dtype=np.float64)
damage = np.zeros((model.nnodes, 3), dtype=np.float64)
regimes = np.zeros(
(model.nnodes, model.max_neighbours), dtype=np.float64)
nregimes = 2
nbond_types = 2
integrator.create_buffers(
nlist, n_neigh, bond_stiffness, critical_stretch, plus_cs,
u, ud, udd, force, body_force, damage, regimes, nregimes,
nbond_types)
assert(type(integrator.bond_stiffness_d) is cl._cl.Buffer)
assert(type(integrator.critical_stretch_d) is cl._cl.Buffer)
@context_available
def test_build_exception(self, velocity_verlet_cl_integrator):
"""Test exception when densities not supplied to VelocityVerletCL."""
model, integrator = velocity_verlet_cl_integrator
nnodes = model.nnodes
degrees_freedom = model.degrees_freedom
max_neighbours = model.max_neighbours
coords = model.coords
family = model.family
volume = model.volume
bc_types = model.bc_types
bc_values = model.bc_values
force_bc_types = model.force_bc_types
force_bc_values = model.force_bc_values
stiffness_corrections = None
bond_types = None
densities = None
with pytest.raises(ValueError) as exception:
integrator.build(
nnodes, degrees_freedom, max_neighbours, coords,
volume, family, bc_types, bc_values, force_bc_types,
force_bc_values, stiffness_corrections, bond_types, densities)
assert (
str("densities must be supplied") in exception.value)
@context_available
def test_create_special_buffers(self, velocity_verlet_cl_integrator):
"""There are no special buffers so this method does nothing."""
model, integrator = velocity_verlet_cl_integrator
value = integrator._create_special_buffers()
assert value is None
| 40.867947
| 79
| 0.653468
| 4,181
| 34,043
| 5.086821
| 0.054772
| 0.038085
| 0.055294
| 0.068554
| 0.885415
| 0.867689
| 0.838302
| 0.819259
| 0.805435
| 0.794903
| 0
| 0.018584
| 0.2555
| 34,043
| 832
| 80
| 40.917067
| 0.820556
| 0.072379
| 0
| 0.814649
| 0
| 0
| 0.03268
| 0.016483
| 0
| 0
| 0
| 0
| 0.100149
| 1
| 0.053812
| false
| 0
| 0.011958
| 0.001495
| 0.083707
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
19a08639e22b385ded398f9e6eee2efac79fa737
| 4,187
|
py
|
Python
|
evaluation/examples.py
|
clamsproject/app-fastpunct
|
138a79a4d2bdc1e6c2ede036973e417fb9a41a86
|
[
"Apache-2.0"
] | null | null | null |
evaluation/examples.py
|
clamsproject/app-fastpunct
|
138a79a4d2bdc1e6c2ede036973e417fb9a41a86
|
[
"Apache-2.0"
] | 5
|
2021-09-20T15:11:30.000Z
|
2022-01-21T00:39:52.000Z
|
evaluation/examples.py
|
clamsproject/app-fastpunct
|
138a79a4d2bdc1e6c2ede036973e417fb9a41a86
|
[
"Apache-2.0"
] | null | null | null |
"""examples.py
Some examples for testing and eyeballing parts of the code.
"""
# This is an example segment where fastpunct inserts repetition into the string
segment_with_duplicates_in = """judy woodruff after the news summary the twa bombing is once again our major focus tonight first israeli prime minister peres says who he thinks is to blame then we hear from a reporter who just returned from europe and the mideast finding out how the terrorists operate and from an airline safety expert we next move on to a sampling of what some astronauts had to say at today's shuttle disaster hearing finally a documentary report on what's being done to protect airliners from a killer called wind shear.news summary"""
segment_with_duplicates_out = """Judy Woodruff after the news summary: "The TWA bombing is once again our major focus tonight first Israeli Prime Minister Peres says who he thinks is to blame then we hear from a reporter who just returned from Europe and the Mideast finding out how the terrorists operate and from an airline safety expert we next move on to a sampling of what some astronauts had to say at today's shuttle disaster hearing finally a documentary report on what's being done to protect airliners from a killer called Wind Shear.news summary: "After the news summary: The TWA bombing is once again our major focus tonight first Israeli Prime Minister Peters: "First Israeli Prime Minister Peres says who he thinks is to blame then we hear from a reporter who just returned from Europe and the mideast finding out how the terrorists operate and from an airline safety expert we next move on to a sampling of what some astronauts had to say at today's shuttle disaster hearing finally a documentary report on what some astronaut"""
# Some cached results from prior invocations of fastpunct so we can run the code
# quickly with loading all the libraries and running the slow code.
cached_results = {
'been':
'Been.',
"evening i'm jim lehrer on the newshour tonight phil ponce likes of the us military reservist call up what it means and who's affected senator attacked and urban debate new post colorado gun control proposals charles krauss tells the story of brazil's economic crisis and rebound and poet laureate robert pinsky reason ben johnson paul all on children and tragedy and all follows a summary that is this tuesday":
"Evening I'm Jim Lehrer on the newshour tonight, Phil Ponce, likes of the U.S. military reservist, call up what it means and who's affected senator attacked and urban debate new post Colorado gun control proposals, Charles Krauss tells the story of Brazil's economic crisis and rebound and poet Laurent Pinsky reason Ben Johnson Paul All on Children and Tragicity and All follows a summary that is this Tuesday.",
'keating the world is the biggest challenge of the new century which is widely condemned promote satellite technology to help the american farmer be even more connected':
'Keating the world is the biggest challenge of the new century, which is widely condemned promote satellite technology to help the American farmer be even more connected.',
'the world':
'The world.',
"by the corporation for public broadcasting and by the annual financial support from viewers like you president clinton authorized the pentagon today to call up more than thirty thousand reserve and national guard personnel they'll be used in the air campaign over kosovo and the three g i've captured by a bizarre troops were pronounced dead by red cross doctor as bagels bombing of serbia and the flight of ethnic albanian refugees from kosovo continue on beer narrates our update report":
"By the Corporation for Public Broadcasting and by the annual financial support from viewers like you, President Clinton authorized the Pentagon today to call up more than thirty thousand Reserve and National Guard personnel they'll be used in the air campaign over Kosovo and the three G I've captured by a bizarre troops were pronounced dead by Red Cross doctor as Bagels bombing of Serbia and the flight of ethnic Albanian refugees from Kosovo continue on beer narrates, our update report."
}
| 116.305556
| 1,044
| 0.800334
| 702
| 4,187
| 4.763533
| 0.324786
| 0.01256
| 0.020335
| 0.029904
| 0.883971
| 0.883971
| 0.883971
| 0.883971
| 0.883971
| 0.836124
| 0
| 0
| 0.176738
| 4,187
| 35
| 1,045
| 119.628571
| 0.970119
| 0.070695
| 0
| 0
| 0
| 0.571429
| 0.951289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
5fd6dffbd1b89e416601b838b06d2693561c2037
| 19,218
|
py
|
Python
|
piwebasync/api/controllers/eventframes.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | null | null | null |
piwebasync/api/controllers/eventframes.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | 2
|
2022-03-02T17:42:21.000Z
|
2022-03-29T19:24:01.000Z
|
piwebasync/api/controllers/eventframes.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Tuple, Union
from ...types import APIRequestType, ControllerType, QueryStrType
class EventFrames:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe.html
"""
CONTROLLER = "eventframes"
def __init__(self, constructor: ControllerType) -> None:
self._constructor = constructor
def get(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/get.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_by_path(
self,
path: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getbypath.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
path=path,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def acknowledge(
self,
webid: str
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/acknowledge.html
"""
action = "acknowledge"
return self._constructor._build_request(
method="PATCH",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid
)
def execute_search_by_attribute(
self,
search_id: str,
search_mode: str = None,
start_time: datetime = None,
end_time: datetime = None,
name_filter: QueryStrType = None,
referenced_element_name_filter: QueryStrType = None,
severity: Union[List[str], Tuple[str]] = None,
can_be_acknowledged: str = None,
is_acknowleged: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/executesearchbyattribute.html
"""
action = "searchbyattribute"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
add_path=[search_id],
search_mode=search_mode,
start_time=start_time,
end_time=end_time,
name_filter=name_filter,
referenced_element_name_filter=referenced_element_name_filter,
severity_many=severity,
can_be_acknowledged=can_be_acknowledged,
is_acknowleged=is_acknowleged,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def find_event_frame_attributes(
self,
webid: str,
search_mode: str = None,
start_time: datetime = None,
end_time: datetime = None,
event_frame_name_filter: QueryStrType = None,
event_frame_description_filter: QueryStrType = None,
referenced_element_name_filter: QueryStrType = None,
event_frame_category: str = None,
event_frame_template: str = None,
attribute_name_filter: QueryStrType = None,
attribute_description_filter: QueryStrType = None,
attribute_category: str = None,
attribute_type: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/findeventframeattributes.html
"""
action = "eventframeattributes"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
search_mode=search_mode,
start_time=start_time,
end_time=end_time,
event_frame_name_filter=event_frame_name_filter,
event_frame_description_filter=event_frame_description_filter,
referenced_element_name_filter=referenced_element_name_filter,
event_frame_category=event_frame_category,
event_frame_template=event_frame_template,
attribute_name_filter=attribute_name_filter,
attribute_description_filter=attribute_description_filter,
attribute_category=attribute_category,
attribute_type=attribute_type,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations
)
def get_annotation_attachment_media_by_id(
self,
webid: str,
id: str,
disposition: str = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/get.html
"""
action = "annotations"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
add_path=[id, "attachment", "media"],
disposition=disposition,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_annotation_attachment_media_data_by_id(
self,
webid: str,
id: str,
disposition: str = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getannotationattachmentmediadatabyid.html
"""
action = "annotations"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
add_path=[id, "attachment", "media", "mediadata"],
disposition=disposition
)
def get_annotation_attachment_media_metadata_by_id(
self,
webid: str,
id: str,
disposition: str = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getannotationattachmentmediametadatabyid.html
"""
action = "annotations"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
add_path=[id, "attachment", "media", "metadata"],
disposition=disposition,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_annotation_by_id(
self,
webid: str,
id: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getannotationbyid.html
"""
action = "annotations"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
add_path=[id],
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_annotations(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getannotations.html
"""
action = "annotations"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_attributes(
self,
webid: str,
name_filter: QueryStrType = None,
category_name: str = None,
template_name: str = None,
value_type: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
show_excluded: bool = None,
show_hidden: bool = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None,
trait: Union[List[str], Tuple[str]] = None,
trait_category: Union[List[str], Tuple[str]] = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getattributes.html
"""
action = "attributes"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
name_filter=name_filter,
category_name=category_name,
template_name=template_name,
value_type=value_type,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
show_excluded=show_excluded,
show_hidden=show_hidden,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations,
trait_many=trait,
trait_category_many=trait_category
)
def get_categories(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getcategories.html
"""
action = "categories"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_event_frames(
self,
webid: str,
search_mode: str = None,
start_time: datetime = None,
end_time: datetime = None,
name_filter: QueryStrType = None,
referenced_element_name_filter: QueryStrType = None,
category_name: str = None,
template_name: str = None,
referenced_element_template_name: str = None,
severity: Union[List[str], Tuple[str]] = None,
can_be_acknowledged: str = None,
is_acknowleged: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/geteventframes.html
"""
action = "eventframes"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
search_mode=search_mode,
start_time=start_time,
end_time=end_time,
name_filter=name_filter,
referenced_element_name_filter=referenced_element_name_filter,
category_name=category_name,
template_name=template_name,
referenced_element_template_name=referenced_element_template_name,
severity_many=severity,
can_be_acknowledged=can_be_acknowledged,
is_acknowleged=is_acknowleged,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_event_frames_query(
self,
database_web_id: str = None,
query: QueryStrType = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/geteventframesquery.html
"""
action = "search"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
database_web_id=database_web_id,
query=query,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_multiple(
self,
webid: Union[List[str], Tuple[str]] = None,
path: Union[List[str], Tuple[str]] = None,
include_mode: str = None,
as_parallel: bool = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getmultiple.html
"""
assert webid is not None or path is not None
action = "multiple"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
web_id=webid,
paths=path,
include_mode=include_mode,
as_parallel=as_parallel,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations
)
def get_referenced_elements(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getreferencedelements.html
"""
action = "referencedelements"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations
)
def get_security(
self,
webid: str,
user_identity: Union[List[str], Tuple[str]] = None,
force_refresh: bool = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getsecurity.html
"""
action = "security"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
user_identity_many=user_identity,
force_refresh=force_refresh,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_security_entries(
self,
webid: str,
name_filter: QueryStrType = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getsecurityentries.html
"""
action = "securityentries"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
name_filter=name_filter,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_security_entry_by_name(
self,
webid: str,
name: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/eventframe/actions/getsecurityentrybyname.html
"""
action = "securityentries"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type,
add_path = [name]
)
| 32.795222
| 147
| 0.594235
| 1,974
| 19,218
| 5.513678
| 0.071935
| 0.048236
| 0.042172
| 0.043734
| 0.832874
| 0.812477
| 0.788129
| 0.779125
| 0.779125
| 0.762312
| 0
| 0
| 0.310958
| 19,218
| 586
| 148
| 32.795222
| 0.82193
| 0.120356
| 0
| 0.747845
| 0
| 0
| 0.025041
| 0
| 0
| 0
| 0
| 0
| 0.002155
| 1
| 0.043103
| false
| 0
| 0.006466
| 0
| 0.094828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2783e93113013aff83fa33e38dc1ab4de49fb418
| 1,994
|
py
|
Python
|
tests/test_probes.py
|
proofdock/chaos-kit
|
e7f7f8c37075872b503ad5dd95b8341760915b35
|
[
"Apache-2.0"
] | 1
|
2020-11-29T00:00:13.000Z
|
2020-11-29T00:00:13.000Z
|
tests/test_probes.py
|
proofdock/chaos-kit
|
e7f7f8c37075872b503ad5dd95b8341760915b35
|
[
"Apache-2.0"
] | 17
|
2020-05-21T05:44:53.000Z
|
2020-10-24T07:03:35.000Z
|
tests/test_probes.py
|
proofdock/chaos-kit
|
e7f7f8c37075872b503ad5dd95b8341760915b35
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import ANY, patch
from tests.fixtures import data
from pdchaoskit.alert import probes
@patch('pdchaoskit.alert.probes.client_session', spec=True)
@patch('pdchaoskit.alert.probes.get_alert_rule', spec=True)
@patch('pdchaoskit.alert.probes.get_alerts', spec=True)
@patch('pdchaoskit.alert.probes.get_loaded_settings', spec=True)
def test_probe_was_alert_fired_no_alerts(get_loaded_settings, get_alerts, get_alert_rule, session):
# arrange
get_alert_rule.return_value = {"latest_alert": {"status": "resolved"}}
get_alerts.return_value = []
get_loaded_settings.return_value = data.provide_settings()
# act
result = probes.was_alert_fired('rule_1')
assert result is False
get_alert_rule.assert_called_once_with('rule_1', ANY)
get_alerts.assert_called_once_with('rule_1', 'fired', ANY, ANY, ANY)
@patch('pdchaoskit.alert.probes.client_session', spec=True)
@patch('pdchaoskit.alert.probes.get_alert_rule', spec=True)
def test_probe_was_alert_fired_latest_fired(get_alert_rule, session):
# arrange
get_alert_rule.return_value = {"latest_alert": {"status": "fired"}}
# act
result = probes.was_alert_fired('rule_1')
assert result is True
get_alert_rule.assert_called_once_with('rule_1', ANY)
@patch('pdchaoskit.alert.probes.client_session', spec=True)
@patch('pdchaoskit.alert.probes.get_alert_rule', spec=True)
@patch('pdchaoskit.alert.probes.get_alerts', spec=True)
@patch('pdchaoskit.alert.probes.get_loaded_settings', spec=True)
def test_probe_was_alert_fired_fired_during_experiment(get_loaded_settings, get_alerts, get_alert_rule, session):
# arrange
get_alert_rule.return_value = {"latest_alert": {"status": "resolved"}}
get_alerts.return_value = [{"alert_time": "2020-07-23T19:49:59Z"}]
get_loaded_settings.return_value = data.provide_settings()
# act
result = probes.was_alert_fired('rule_1')
assert result is True
get_alert_rule.assert_called_once_with('rule_1', ANY)
| 36.254545
| 113
| 0.764794
| 287
| 1,994
| 4.954704
| 0.1777
| 0.067511
| 0.101266
| 0.182841
| 0.871308
| 0.871308
| 0.853727
| 0.853727
| 0.836146
| 0.836146
| 0
| 0.011871
| 0.112839
| 1,994
| 54
| 114
| 36.925926
| 0.791973
| 0.017553
| 0
| 0.666667
| 0
| 0
| 0.273566
| 0.195697
| 0
| 0
| 0
| 0
| 0.212121
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
27cd44aba6863b40e053af69bed026dfa0a2e2b5
| 54,719
|
py
|
Python
|
pb/quota_pb2.py
|
zheng-zy/ot_root
|
920236b48458aeed72968bc7ec8e01a084b15951
|
[
"Artistic-2.0"
] | 1
|
2016-03-23T07:54:55.000Z
|
2016-03-23T07:54:55.000Z
|
pb/quota_pb2.py
|
zheng-zy/ot_root
|
920236b48458aeed72968bc7ec8e01a084b15951
|
[
"Artistic-2.0"
] | null | null | null |
pb/quota_pb2.py
|
zheng-zy/ot_root
|
920236b48458aeed72968bc7ec8e01a084b15951
|
[
"Artistic-2.0"
] | 1
|
2020-07-23T18:27:53.000Z
|
2020-07-23T18:27:53.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: quota.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='quota.proto',
package='ly.quota',
serialized_pb=_b('\n\x0bquota.proto\x12\x08ly.quota\",\n\nMarketInfo\x12\x0e\n\x06\x65xchid\x18\x01 \x02(\x05\x12\x0e\n\x06market\x18\x02 \x02(\t\"3\n\nMarketList\x12%\n\x07mk_list\x18\x01 \x03(\x0b\x32\x14.ly.quota.MarketInfo\"\x1e\n\x0c\x43odeTableReq\x12\x0e\n\x06\x65xchid\x18\x01 \x02(\x05\"!\n\x0f\x43odeTableChange\x12\x0e\n\x06\x65xchid\x18\x01 \x02(\x05\"R\n\x0cSecurityCode\x12\r\n\x05idnum\x18\x01 \x02(\x05\x12\x0c\n\x04type\x18\x02 \x02(\x05\x12\x15\n\rsecurity_code\x18\x03 \x02(\t\x12\x0e\n\x06symbol\x18\x04 \x02(\t\"\x82\x01\n\x10SecurityCodeResp\x12\x0e\n\x06source\x18\x01 \x02(\x05\x12\x0c\n\x04\x64\x61te\x18\x02 \x02(\x05\x12\r\n\x05\x63ount\x18\x03 \x02(\x05\x12\r\n\x05\x66lags\x18\x04 \x02(\x05\x12\x32\n\x12security_code_list\x18\x05 \x03(\x0b\x32\x16.ly.quota.SecurityCode\":\n\x15MarketDataReqByMdType\x12\x10\n\x08sub_type\x18\x01 \x02(\r\x12\x0f\n\x07mk_type\x18\x02 \x02(\r\"7\n\x14MarketDataReqByIdnum\x12\x10\n\x08sub_type\x18\x01 \x02(\r\x12\r\n\x05idnum\x18\x02 \x03(\x05\"\x9b\x04\n\nMarketData\x12\r\n\x05idnum\x18\x01 \x02(\x05\x12\x0c\n\x04time\x18\x02 \x02(\x05\x12\x0e\n\x06status\x18\x03 \x02(\x05\x12\x11\n\tpre_close\x18\x04 \x02(\r\x12\x0c\n\x04open\x18\x05 \x02(\r\x12\x0c\n\x04high\x18\x06 \x02(\r\x12\x0b\n\x03low\x18\x07 \x02(\r\x12\r\n\x05match\x18\x08 \x02(\r\x12\x15\n\task_price\x18\t \x03(\rB\x02\x10\x01\x12\x13\n\x07\x61sk_vol\x18\n \x03(\rB\x02\x10\x01\x12\x15\n\tbid_price\x18\x0b \x03(\rB\x02\x10\x01\x12\x13\n\x07\x62id_vol\x18\x0c \x03(\rB\x02\x10\x01\x12\x12\n\nnum_trades\x18\r \x02(\r\x12\x0e\n\x06volume\x18\x0e \x02(\x03\x12\x10\n\x08turnover\x18\x0f \x02(\x03\x12\x15\n\rtotal_bid_vol\x18\x10 \x02(\x03\x12\x15\n\rtotal_ask_vol\x18\x11 \x02(\x03\x12\x1e\n\x16weighted_avg_bid_price\x18\x12 \x02(\r\x12\x1e\n\x16weighted_avg_ask_price\x18\x13 \x02(\r\x12\x0c\n\x04iopv\x18\x14 \x02(\x05\x12\x19\n\x11yield_to_maturity\x18\x15 \x02(\x05\x12\x14\n\x0chigh_limited\x18\x16 \x02(\r\x12\x13\n\x0blow_limited\x18\x17 \x02(\r\x12\x0c\n\x04stat\x18\x18 \x02(\x05\x12\x10\n\x08pub_time\x18\x19 \x02(\x05\x12\x12\n\nnow_volume\x18\x1a \x02(\x03\x12\x11\n\tnow_value\x18\x1b \x02(\x03\"\xca\x01\n\tIndexData\x12\r\n\x05idnum\x18\x01 \x02(\x05\x12\x0c\n\x04time\x18\x02 \x02(\x05\x12\x12\n\nopen_index\x18\x03 \x02(\x05\x12\x12\n\nhigh_index\x18\x04 \x02(\x05\x12\x11\n\tlow_index\x18\x05 \x02(\x05\x12\x12\n\nlast_index\x18\x06 \x02(\x05\x12\x14\n\x0ctotal_volume\x18\x07 \x02(\x03\x12\x10\n\x08turnover\x18\x08 \x02(\x03\x12\x17\n\x0fpre_close_index\x18\t \x02(\x05\x12\x10\n\x08pub_time\x18\n \x02(\x05\"\xf0\x03\n\x11MarketDataFutures\x12\r\n\x05idnum\x18\x01 \x02(\x05\x12\x0c\n\x04time\x18\x02 \x02(\x05\x12\x0e\n\x06status\x18\x03 \x02(\x05\x12\x19\n\x11pre_open_interest\x18\x04 \x02(\x03\x12\x11\n\tpre_close\x18\x05 \x02(\r\x12\x18\n\x10pre_settle_price\x18\x06 \x02(\r\x12\x0c\n\x04open\x18\x07 \x02(\r\x12\x0c\n\x04high\x18\x08 \x02(\r\x12\x0b\n\x03low\x18\t \x02(\r\x12\x0c\n\x04last\x18\n \x02(\r\x12\x0e\n\x06volume\x18\x0b \x02(\x03\x12\x10\n\x08turnover\x18\x0c \x02(\x03\x12\x15\n\ropen_interest\x18\r \x02(\x03\x12\r\n\x05\x63lose\x18\x0e \x02(\r\x12\x14\n\x0csettle_price\x18\x0f \x02(\r\x12\x14\n\x0chigh_limited\x18\x10 \x02(\r\x12\x13\n\x0blow_limited\x18\x11 \x02(\r\x12\x11\n\tpre_delta\x18\x12 \x02(\x05\x12\x12\n\ncurr_delta\x18\x13 \x02(\x05\x12\x15\n\task_price\x18\x14 \x03(\rB\x02\x10\x01\x12\x13\n\x07\x61sk_vol\x18\x15 \x03(\rB\x02\x10\x01\x12\x15\n\tbid_price\x18\x16 \x03(\rB\x02\x10\x01\x12\x13\n\x07\x62id_vol\x18\x17 \x03(\rB\x02\x10\x01\x12\x15\n\raverage_price\x18\x18 \x01(\r\x12\x10\n\x08pub_time\x18\x19 \x02(\x05\"|\n\x0bTransaction\x12\r\n\x05idnum\x18\x01 \x02(\x05\x12\x0c\n\x04time\x18\x02 \x02(\x05\x12\r\n\x05index\x18\x03 \x02(\x05\x12\r\n\x05price\x18\x04 \x02(\x05\x12\x0e\n\x06volume\x18\x05 \x02(\x05\x12\x10\n\x08turnover\x18\x06 \x02(\x05\x12\x10\n\x08pub_time\x18\x07 \x02(\x05\"\xc6\x01\n\x11TransactionExItem\x12\x0c\n\x04time\x18\x01 \x01(\x05\x12\x10\n\x08trade_no\x18\x02 \x01(\x05\x12\r\n\x05price\x18\x03 \x01(\x05\x12\x0e\n\x06volume\x18\x04 \x01(\x05\x12\x10\n\x08turnover\x18\x05 \x01(\x05\x12\x0f\n\x07\x62s_flag\x18\x06 \x01(\x05\x12\x12\n\norder_kind\x18\x07 \x01(\x05\x12\x15\n\rfunction_code\x18\x08 \x01(\x05\x12\x11\n\task_order\x18\t \x01(\x05\x12\x11\n\tbid_order\x18\n \x01(\x05\"k\n\rTransactionEx\x12\r\n\x05idnum\x18\x01 \x01(\x05\x12\r\n\x05\x63ount\x18\x02 \x01(\x05\x12\x10\n\x08pub_time\x18\x03 \x01(\x05\x12*\n\x05items\x18\x04 \x03(\x0b\x32\x1b.ly.quota.TransactionExItem\"y\n\nOrderQueue\x12\r\n\x05idnum\x18\x01 \x02(\x05\x12\x0c\n\x04time\x18\x02 \x02(\x05\x12\x0c\n\x04side\x18\x03 \x02(\x05\x12\r\n\x05price\x18\x04 \x02(\x05\x12\x0e\n\x06orders\x18\x05 \x02(\x05\x12\x0f\n\x07volumes\x18\x06 \x03(\x05\x12\x10\n\x08pub_time\x18\x07 \x02(\x05')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MARKETINFO = _descriptor.Descriptor(
name='MarketInfo',
full_name='ly.quota.MarketInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='exchid', full_name='ly.quota.MarketInfo.exchid', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='market', full_name='ly.quota.MarketInfo.market', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=69,
)
_MARKETLIST = _descriptor.Descriptor(
name='MarketList',
full_name='ly.quota.MarketList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mk_list', full_name='ly.quota.MarketList.mk_list', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=71,
serialized_end=122,
)
_CODETABLEREQ = _descriptor.Descriptor(
name='CodeTableReq',
full_name='ly.quota.CodeTableReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='exchid', full_name='ly.quota.CodeTableReq.exchid', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=154,
)
_CODETABLECHANGE = _descriptor.Descriptor(
name='CodeTableChange',
full_name='ly.quota.CodeTableChange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='exchid', full_name='ly.quota.CodeTableChange.exchid', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=189,
)
_SECURITYCODE = _descriptor.Descriptor(
name='SecurityCode',
full_name='ly.quota.SecurityCode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.SecurityCode.idnum', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='ly.quota.SecurityCode.type', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='security_code', full_name='ly.quota.SecurityCode.security_code', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='symbol', full_name='ly.quota.SecurityCode.symbol', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=191,
serialized_end=273,
)
_SECURITYCODERESP = _descriptor.Descriptor(
name='SecurityCodeResp',
full_name='ly.quota.SecurityCodeResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='ly.quota.SecurityCodeResp.source', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='date', full_name='ly.quota.SecurityCodeResp.date', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='count', full_name='ly.quota.SecurityCodeResp.count', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flags', full_name='ly.quota.SecurityCodeResp.flags', index=3,
number=4, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='security_code_list', full_name='ly.quota.SecurityCodeResp.security_code_list', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=276,
serialized_end=406,
)
_MARKETDATAREQBYMDTYPE = _descriptor.Descriptor(
name='MarketDataReqByMdType',
full_name='ly.quota.MarketDataReqByMdType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sub_type', full_name='ly.quota.MarketDataReqByMdType.sub_type', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mk_type', full_name='ly.quota.MarketDataReqByMdType.mk_type', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=408,
serialized_end=466,
)
_MARKETDATAREQBYIDNUM = _descriptor.Descriptor(
name='MarketDataReqByIdnum',
full_name='ly.quota.MarketDataReqByIdnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sub_type', full_name='ly.quota.MarketDataReqByIdnum.sub_type', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.MarketDataReqByIdnum.idnum', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=468,
serialized_end=523,
)
_MARKETDATA = _descriptor.Descriptor(
name='MarketData',
full_name='ly.quota.MarketData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.MarketData.idnum', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time', full_name='ly.quota.MarketData.time', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='ly.quota.MarketData.status', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pre_close', full_name='ly.quota.MarketData.pre_close', index=3,
number=4, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='open', full_name='ly.quota.MarketData.open', index=4,
number=5, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high', full_name='ly.quota.MarketData.high', index=5,
number=6, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='low', full_name='ly.quota.MarketData.low', index=6,
number=7, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='match', full_name='ly.quota.MarketData.match', index=7,
number=8, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ask_price', full_name='ly.quota.MarketData.ask_price', index=8,
number=9, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='ask_vol', full_name='ly.quota.MarketData.ask_vol', index=9,
number=10, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bid_price', full_name='ly.quota.MarketData.bid_price', index=10,
number=11, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bid_vol', full_name='ly.quota.MarketData.bid_vol', index=11,
number=12, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='num_trades', full_name='ly.quota.MarketData.num_trades', index=12,
number=13, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='volume', full_name='ly.quota.MarketData.volume', index=13,
number=14, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='turnover', full_name='ly.quota.MarketData.turnover', index=14,
number=15, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='total_bid_vol', full_name='ly.quota.MarketData.total_bid_vol', index=15,
number=16, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='total_ask_vol', full_name='ly.quota.MarketData.total_ask_vol', index=16,
number=17, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weighted_avg_bid_price', full_name='ly.quota.MarketData.weighted_avg_bid_price', index=17,
number=18, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weighted_avg_ask_price', full_name='ly.quota.MarketData.weighted_avg_ask_price', index=18,
number=19, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='iopv', full_name='ly.quota.MarketData.iopv', index=19,
number=20, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='yield_to_maturity', full_name='ly.quota.MarketData.yield_to_maturity', index=20,
number=21, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high_limited', full_name='ly.quota.MarketData.high_limited', index=21,
number=22, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='low_limited', full_name='ly.quota.MarketData.low_limited', index=22,
number=23, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stat', full_name='ly.quota.MarketData.stat', index=23,
number=24, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pub_time', full_name='ly.quota.MarketData.pub_time', index=24,
number=25, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='now_volume', full_name='ly.quota.MarketData.now_volume', index=25,
number=26, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='now_value', full_name='ly.quota.MarketData.now_value', index=26,
number=27, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=526,
serialized_end=1065,
)
_INDEXDATA = _descriptor.Descriptor(
name='IndexData',
full_name='ly.quota.IndexData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.IndexData.idnum', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time', full_name='ly.quota.IndexData.time', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='open_index', full_name='ly.quota.IndexData.open_index', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high_index', full_name='ly.quota.IndexData.high_index', index=3,
number=4, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='low_index', full_name='ly.quota.IndexData.low_index', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_index', full_name='ly.quota.IndexData.last_index', index=5,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='total_volume', full_name='ly.quota.IndexData.total_volume', index=6,
number=7, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='turnover', full_name='ly.quota.IndexData.turnover', index=7,
number=8, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pre_close_index', full_name='ly.quota.IndexData.pre_close_index', index=8,
number=9, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pub_time', full_name='ly.quota.IndexData.pub_time', index=9,
number=10, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1068,
serialized_end=1270,
)
_MARKETDATAFUTURES = _descriptor.Descriptor(
name='MarketDataFutures',
full_name='ly.quota.MarketDataFutures',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.MarketDataFutures.idnum', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time', full_name='ly.quota.MarketDataFutures.time', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='ly.quota.MarketDataFutures.status', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pre_open_interest', full_name='ly.quota.MarketDataFutures.pre_open_interest', index=3,
number=4, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pre_close', full_name='ly.quota.MarketDataFutures.pre_close', index=4,
number=5, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pre_settle_price', full_name='ly.quota.MarketDataFutures.pre_settle_price', index=5,
number=6, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='open', full_name='ly.quota.MarketDataFutures.open', index=6,
number=7, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high', full_name='ly.quota.MarketDataFutures.high', index=7,
number=8, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='low', full_name='ly.quota.MarketDataFutures.low', index=8,
number=9, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last', full_name='ly.quota.MarketDataFutures.last', index=9,
number=10, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='volume', full_name='ly.quota.MarketDataFutures.volume', index=10,
number=11, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='turnover', full_name='ly.quota.MarketDataFutures.turnover', index=11,
number=12, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='open_interest', full_name='ly.quota.MarketDataFutures.open_interest', index=12,
number=13, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='close', full_name='ly.quota.MarketDataFutures.close', index=13,
number=14, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='settle_price', full_name='ly.quota.MarketDataFutures.settle_price', index=14,
number=15, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high_limited', full_name='ly.quota.MarketDataFutures.high_limited', index=15,
number=16, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='low_limited', full_name='ly.quota.MarketDataFutures.low_limited', index=16,
number=17, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pre_delta', full_name='ly.quota.MarketDataFutures.pre_delta', index=17,
number=18, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curr_delta', full_name='ly.quota.MarketDataFutures.curr_delta', index=18,
number=19, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ask_price', full_name='ly.quota.MarketDataFutures.ask_price', index=19,
number=20, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='ask_vol', full_name='ly.quota.MarketDataFutures.ask_vol', index=20,
number=21, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bid_price', full_name='ly.quota.MarketDataFutures.bid_price', index=21,
number=22, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bid_vol', full_name='ly.quota.MarketDataFutures.bid_vol', index=22,
number=23, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='average_price', full_name='ly.quota.MarketDataFutures.average_price', index=23,
number=24, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pub_time', full_name='ly.quota.MarketDataFutures.pub_time', index=24,
number=25, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1273,
serialized_end=1769,
)
_TRANSACTION = _descriptor.Descriptor(
name='Transaction',
full_name='ly.quota.Transaction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.Transaction.idnum', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time', full_name='ly.quota.Transaction.time', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='index', full_name='ly.quota.Transaction.index', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='price', full_name='ly.quota.Transaction.price', index=3,
number=4, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='volume', full_name='ly.quota.Transaction.volume', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='turnover', full_name='ly.quota.Transaction.turnover', index=5,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pub_time', full_name='ly.quota.Transaction.pub_time', index=6,
number=7, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1771,
serialized_end=1895,
)
_TRANSACTIONEXITEM = _descriptor.Descriptor(
name='TransactionExItem',
full_name='ly.quota.TransactionExItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time', full_name='ly.quota.TransactionExItem.time', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trade_no', full_name='ly.quota.TransactionExItem.trade_no', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='price', full_name='ly.quota.TransactionExItem.price', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='volume', full_name='ly.quota.TransactionExItem.volume', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='turnover', full_name='ly.quota.TransactionExItem.turnover', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bs_flag', full_name='ly.quota.TransactionExItem.bs_flag', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='order_kind', full_name='ly.quota.TransactionExItem.order_kind', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='function_code', full_name='ly.quota.TransactionExItem.function_code', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ask_order', full_name='ly.quota.TransactionExItem.ask_order', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bid_order', full_name='ly.quota.TransactionExItem.bid_order', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1898,
serialized_end=2096,
)
_TRANSACTIONEX = _descriptor.Descriptor(
name='TransactionEx',
full_name='ly.quota.TransactionEx',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.TransactionEx.idnum', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='count', full_name='ly.quota.TransactionEx.count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pub_time', full_name='ly.quota.TransactionEx.pub_time', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='items', full_name='ly.quota.TransactionEx.items', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2098,
serialized_end=2205,
)
_ORDERQUEUE = _descriptor.Descriptor(
name='OrderQueue',
full_name='ly.quota.OrderQueue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idnum', full_name='ly.quota.OrderQueue.idnum', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time', full_name='ly.quota.OrderQueue.time', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='side', full_name='ly.quota.OrderQueue.side', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='price', full_name='ly.quota.OrderQueue.price', index=3,
number=4, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='orders', full_name='ly.quota.OrderQueue.orders', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='volumes', full_name='ly.quota.OrderQueue.volumes', index=5,
number=6, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pub_time', full_name='ly.quota.OrderQueue.pub_time', index=6,
number=7, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2207,
serialized_end=2328,
)
_MARKETLIST.fields_by_name['mk_list'].message_type = _MARKETINFO
_SECURITYCODERESP.fields_by_name['security_code_list'].message_type = _SECURITYCODE
_TRANSACTIONEX.fields_by_name['items'].message_type = _TRANSACTIONEXITEM
DESCRIPTOR.message_types_by_name['MarketInfo'] = _MARKETINFO
DESCRIPTOR.message_types_by_name['MarketList'] = _MARKETLIST
DESCRIPTOR.message_types_by_name['CodeTableReq'] = _CODETABLEREQ
DESCRIPTOR.message_types_by_name['CodeTableChange'] = _CODETABLECHANGE
DESCRIPTOR.message_types_by_name['SecurityCode'] = _SECURITYCODE
DESCRIPTOR.message_types_by_name['SecurityCodeResp'] = _SECURITYCODERESP
DESCRIPTOR.message_types_by_name['MarketDataReqByMdType'] = _MARKETDATAREQBYMDTYPE
DESCRIPTOR.message_types_by_name['MarketDataReqByIdnum'] = _MARKETDATAREQBYIDNUM
DESCRIPTOR.message_types_by_name['MarketData'] = _MARKETDATA
DESCRIPTOR.message_types_by_name['IndexData'] = _INDEXDATA
DESCRIPTOR.message_types_by_name['MarketDataFutures'] = _MARKETDATAFUTURES
DESCRIPTOR.message_types_by_name['Transaction'] = _TRANSACTION
DESCRIPTOR.message_types_by_name['TransactionExItem'] = _TRANSACTIONEXITEM
DESCRIPTOR.message_types_by_name['TransactionEx'] = _TRANSACTIONEX
DESCRIPTOR.message_types_by_name['OrderQueue'] = _ORDERQUEUE
MarketInfo = _reflection.GeneratedProtocolMessageType('MarketInfo', (_message.Message,), dict(
DESCRIPTOR = _MARKETINFO,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.MarketInfo)
))
_sym_db.RegisterMessage(MarketInfo)
MarketList = _reflection.GeneratedProtocolMessageType('MarketList', (_message.Message,), dict(
DESCRIPTOR = _MARKETLIST,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.MarketList)
))
_sym_db.RegisterMessage(MarketList)
CodeTableReq = _reflection.GeneratedProtocolMessageType('CodeTableReq', (_message.Message,), dict(
DESCRIPTOR = _CODETABLEREQ,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.CodeTableReq)
))
_sym_db.RegisterMessage(CodeTableReq)
CodeTableChange = _reflection.GeneratedProtocolMessageType('CodeTableChange', (_message.Message,), dict(
DESCRIPTOR = _CODETABLECHANGE,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.CodeTableChange)
))
_sym_db.RegisterMessage(CodeTableChange)
SecurityCode = _reflection.GeneratedProtocolMessageType('SecurityCode', (_message.Message,), dict(
DESCRIPTOR = _SECURITYCODE,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.SecurityCode)
))
_sym_db.RegisterMessage(SecurityCode)
SecurityCodeResp = _reflection.GeneratedProtocolMessageType('SecurityCodeResp', (_message.Message,), dict(
DESCRIPTOR = _SECURITYCODERESP,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.SecurityCodeResp)
))
_sym_db.RegisterMessage(SecurityCodeResp)
MarketDataReqByMdType = _reflection.GeneratedProtocolMessageType('MarketDataReqByMdType', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQBYMDTYPE,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.MarketDataReqByMdType)
))
_sym_db.RegisterMessage(MarketDataReqByMdType)
MarketDataReqByIdnum = _reflection.GeneratedProtocolMessageType('MarketDataReqByIdnum', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQBYIDNUM,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.MarketDataReqByIdnum)
))
_sym_db.RegisterMessage(MarketDataReqByIdnum)
MarketData = _reflection.GeneratedProtocolMessageType('MarketData', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATA,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.MarketData)
))
_sym_db.RegisterMessage(MarketData)
IndexData = _reflection.GeneratedProtocolMessageType('IndexData', (_message.Message,), dict(
DESCRIPTOR = _INDEXDATA,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.IndexData)
))
_sym_db.RegisterMessage(IndexData)
MarketDataFutures = _reflection.GeneratedProtocolMessageType('MarketDataFutures', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAFUTURES,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.MarketDataFutures)
))
_sym_db.RegisterMessage(MarketDataFutures)
Transaction = _reflection.GeneratedProtocolMessageType('Transaction', (_message.Message,), dict(
DESCRIPTOR = _TRANSACTION,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.Transaction)
))
_sym_db.RegisterMessage(Transaction)
TransactionExItem = _reflection.GeneratedProtocolMessageType('TransactionExItem', (_message.Message,), dict(
DESCRIPTOR = _TRANSACTIONEXITEM,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.TransactionExItem)
))
_sym_db.RegisterMessage(TransactionExItem)
TransactionEx = _reflection.GeneratedProtocolMessageType('TransactionEx', (_message.Message,), dict(
DESCRIPTOR = _TRANSACTIONEX,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.TransactionEx)
))
_sym_db.RegisterMessage(TransactionEx)
OrderQueue = _reflection.GeneratedProtocolMessageType('OrderQueue', (_message.Message,), dict(
DESCRIPTOR = _ORDERQUEUE,
__module__ = 'quota_pb2'
# @@protoc_insertion_point(class_scope:ly.quota.OrderQueue)
))
_sym_db.RegisterMessage(OrderQueue)
_MARKETDATA.fields_by_name['ask_price'].has_options = True
_MARKETDATA.fields_by_name['ask_price']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_MARKETDATA.fields_by_name['ask_vol'].has_options = True
_MARKETDATA.fields_by_name['ask_vol']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_MARKETDATA.fields_by_name['bid_price'].has_options = True
_MARKETDATA.fields_by_name['bid_price']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_MARKETDATA.fields_by_name['bid_vol'].has_options = True
_MARKETDATA.fields_by_name['bid_vol']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_MARKETDATAFUTURES.fields_by_name['ask_price'].has_options = True
_MARKETDATAFUTURES.fields_by_name['ask_price']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_MARKETDATAFUTURES.fields_by_name['ask_vol'].has_options = True
_MARKETDATAFUTURES.fields_by_name['ask_vol']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_MARKETDATAFUTURES.fields_by_name['bid_price'].has_options = True
_MARKETDATAFUTURES.fields_by_name['bid_price']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_MARKETDATAFUTURES.fields_by_name['bid_vol'].has_options = True
_MARKETDATAFUTURES.fields_by_name['bid_vol']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| 43.085827
| 4,792
| 0.728248
| 7,408
| 54,719
| 5.112851
| 0.044546
| 0.071602
| 0.032474
| 0.048712
| 0.831318
| 0.752746
| 0.717631
| 0.700523
| 0.687295
| 0.683388
| 0
| 0.050577
| 0.140024
| 54,719
| 1,269
| 4,793
| 43.119779
| 0.754319
| 0.019591
| 0
| 0.70938
| 1
| 0.000838
| 0.193541
| 0.155425
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005025
| 0
| 0.005025
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fdb1e61928e3dc7743483c423fb65c045dbb2ecc
| 8,552
|
py
|
Python
|
src/tsar_test_add_NN.py
|
gentry-atkinson/tsar
|
cb7ae0ef8e2d45169e6cdee7d941a08b694b6292
|
[
"CC-BY-4.0"
] | null | null | null |
src/tsar_test_add_NN.py
|
gentry-atkinson/tsar
|
cb7ae0ef8e2d45169e6cdee7d941a08b694b6292
|
[
"CC-BY-4.0"
] | null | null | null |
src/tsar_test_add_NN.py
|
gentry-atkinson/tsar
|
cb7ae0ef8e2d45169e6cdee7d941a08b694b6292
|
[
"CC-BY-4.0"
] | null | null | null |
#Author: Gentry Atkinson
#Organization: Texas University
#Data: 30 December, 2020
#Expand the tsar test to additional models using the features sets from "tsar test"
import datetime
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
if __name__ == "__main__":
unimib_feats_noisy = np.genfromtxt("test/visualizations_for_unimib_sup/UniMiB_sup_feat.csv", delimiter=',')
unimib_y_noisy = np.genfromtxt("test/visualizations_for_unimib_sup/UniMiB_shuffeled_labels.csv", delimiter=',', dtype='int32')
#unimib_y_noisy = np.argmax(unimib_y_noisy, axis=-1)
unimib_bad_guys = np.genfromtxt("test/visualizations_for_unimib_sup/UniMiB_identified_bad.csv", delimiter=',')
unimib_feats_clean = np.delete(unimib_feats_noisy, unimib_bad_guys, 0)
unimib_y_clean = np.delete(unimib_y_noisy, unimib_bad_guys, 0)
print("Unimib noisy features shape: ", unimib_feats_noisy.shape)
print("Unimib noisy y shape: ", unimib_y_noisy.shape)
print("Unimib clean features shape: ", unimib_feats_clean.shape)
print("Unimib clean y shape: ", unimib_y_clean.shape)
num_labels = unimib_y_noisy.shape[1]
log = open("data_cleaning_experiments_results.txt", 'a+')
log.write("{}\n".format(datetime.datetime.now()))
log.write("------- UniMiB NN Experiments-----------\n\n")
noisy_X_train, noisy_X_test, noisy_y_train, noisy_y_test = train_test_split(unimib_feats_noisy, unimib_y_noisy, test_size=0.2, shuffle=False)
clean_X_train, clean_X_test, clean_y_train, clean_y_test = train_test_split(unimib_feats_clean, unimib_y_clean, test_size=0.2, shuffle=False)
log.write("3-layer NN results: \n")
noisy_model = Sequential([
Input(shape=noisy_X_train[0].shape),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(num_labels, activation='softmax')
])
clean_model = Sequential([
Input(shape=clean_X_train[0].shape),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(num_labels, activation='softmax')
])
noisy_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
noisy_model.summary()
clean_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
clean_model.summary()
noisy_model.fit(noisy_X_train, noisy_y_train, epochs=20, verbose=1)
noisy_y_pred = noisy_model.predict(noisy_X_test)
clean_model.fit(clean_X_train, clean_y_train, epochs=20, verbose=1)
clean_y_pred = clean_model.predict(clean_X_test)
noisy_acc = accuracy_score(np.argmax(noisy_y_test, axis=-1), np.argmax(noisy_y_pred, axis=-1))
clean_acc = accuracy_score(np.argmax(clean_y_test, axis=-1), np.argmax(clean_y_pred, axis=-1))
log.write("Noisy: {}\n".format(noisy_acc))
log.write("Clean: {}\n".format(clean_acc))
log.write("===\n\n".format(clean_acc))
log.write("6-layer NN results: \n")
noisy_model = Sequential([
Input(shape=noisy_X_train[0].shape),
Dense(128, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(num_labels, activation='softmax')
])
clean_model = Sequential([
Input(shape=clean_X_train[0].shape),
Dense(128, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(num_labels, activation='softmax')
])
noisy_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
noisy_model.summary()
clean_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
clean_model.summary()
noisy_model.fit(noisy_X_train, noisy_y_train, epochs=20, verbose=1)
noisy_y_pred = noisy_model.predict(noisy_X_test)
clean_model.fit(clean_X_train, clean_y_train, epochs=20, verbose=1)
clean_y_pred = clean_model.predict(clean_X_test)
noisy_acc = accuracy_score(np.argmax(noisy_y_test, axis=-1), np.argmax(noisy_y_pred, axis=-1))
clean_acc = accuracy_score(np.argmax(clean_y_test, axis=-1), np.argmax(clean_y_pred, axis=-1))
log.write("Noisy: {}\n".format(noisy_acc))
log.write("Clean: {}\n".format(clean_acc))
log.write("===\n\n".format(clean_acc))
log.flush()
uci_feats_noisy = np.genfromtxt("test/visualizations_for_uci_sup/UCI_sup_feat.csv", delimiter=',')
uci_y_noisy = np.genfromtxt("test/visualizations_for_uci_sup/UCI_shuffeled_labels.csv", delimiter=',', dtype='int32')
#uci_y_noisy = np.argmax(uci_y_noisy, axis=-1)
uci_bad_guys = np.genfromtxt("test/visualizations_for_uci_sup/UCI_identified_bad.csv", delimiter=',')
uci_feats_clean = np.delete(uci_feats_noisy, uci_bad_guys, 0)
uci_y_clean = np.delete(uci_y_noisy, uci_bad_guys, 0)
print("UCI noisy features shape: ", uci_feats_noisy.shape)
print("UCI noisy y shape: ", uci_y_noisy.shape)
print("UCI clean features shape: ", uci_feats_clean.shape)
print("UCI clean y shape: ", uci_y_clean.shape)
num_labels = uci_y_noisy.shape[1]
log.write("------- UCI NN Experiments-----------\n\n")
noisy_X_train, noisy_X_test, noisy_y_train, noisy_y_test = train_test_split(uci_feats_noisy, uci_y_noisy, test_size=0.2, shuffle=False)
clean_X_train, clean_X_test, clean_y_train, clean_y_test = train_test_split(uci_feats_clean, uci_y_clean, test_size=0.2, shuffle=False)
log.write("3-layer NN results: \n")
noisy_model = Sequential([
Input(shape=noisy_X_train[0].shape),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(num_labels, activation='softmax')
])
clean_model = Sequential([
Input(shape=clean_X_train[0].shape),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(num_labels, activation='softmax')
])
noisy_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
noisy_model.summary()
clean_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
clean_model.summary()
noisy_model.fit(noisy_X_train, noisy_y_train, epochs=20, verbose=1)
noisy_y_pred = noisy_model.predict(noisy_X_test)
clean_model.fit(clean_X_train, clean_y_train, epochs=20, verbose=1)
clean_y_pred = clean_model.predict(clean_X_test)
noisy_acc = accuracy_score(np.argmax(noisy_y_test, axis=-1), np.argmax(noisy_y_pred, axis=-1))
clean_acc = accuracy_score(np.argmax(clean_y_test, axis=-1), np.argmax(clean_y_pred, axis=-1))
log.write("Noisy: {}\n".format(noisy_acc))
log.write("Clean: {}\n".format(clean_acc))
log.write("===\n\n".format(clean_acc))
log.write("6-layer NN results: \n")
noisy_model = Sequential([
Input(shape=noisy_X_train[0].shape),
Dense(128, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(num_labels, activation='softmax')
])
clean_model = Sequential([
Input(shape=clean_X_train[0].shape),
Dense(128, activation='relu'),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(num_labels, activation='softmax')
])
noisy_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
noisy_model.summary()
clean_model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['acc'])
clean_model.summary()
noisy_model.fit(noisy_X_train, noisy_y_train, epochs=20, verbose=1)
noisy_y_pred = noisy_model.predict(noisy_X_test)
clean_model.fit(clean_X_train, clean_y_train, epochs=20, verbose=1)
clean_y_pred = clean_model.predict(clean_X_test)
noisy_acc = accuracy_score(np.argmax(noisy_y_test, axis=-1), np.argmax(noisy_y_pred, axis=-1))
clean_acc = accuracy_score(np.argmax(clean_y_test, axis=-1), np.argmax(clean_y_pred, axis=-1))
log.write("Noisy: {}\n".format(noisy_acc))
log.write("Clean: {}\n".format(clean_acc))
log.write("===\n\n".format(clean_acc))
log.flush()
log.close()
| 45.248677
| 145
| 0.702409
| 1,223
| 8,552
| 4.611611
| 0.092396
| 0.069504
| 0.094326
| 0.046809
| 0.813475
| 0.77766
| 0.764539
| 0.753191
| 0.737234
| 0.701773
| 0
| 0.02013
| 0.151894
| 8,552
| 188
| 146
| 45.489362
| 0.75748
| 0.029701
| 0
| 0.76129
| 0
| 0
| 0.159431
| 0.074168
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045161
| 0
| 0.045161
| 0.051613
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e359e4fb6f2c72beba5be6e2a26a31fcda0bcfd4
| 595
|
py
|
Python
|
HMS/Hospital/apps.py
|
Arshad360/Hospital-Management-System-Cse327-Projectr
|
707ab80d021f8a2d10e28b25dd8df5aea512787a
|
[
"MIT"
] | 2
|
2021-02-10T18:10:30.000Z
|
2021-04-27T18:07:51.000Z
|
HMS/Hospital/apps.py
|
Arshad360/Hospital-Management-System-Cse327-Projectr
|
707ab80d021f8a2d10e28b25dd8df5aea512787a
|
[
"MIT"
] | 1
|
2020-09-23T19:00:54.000Z
|
2020-09-23T19:04:22.000Z
|
HMS/Hospital/apps.py
|
Arshad360/Hospital-Management-System-Cse327-Projectr
|
707ab80d021f8a2d10e28b25dd8df5aea512787a
|
[
"MIT"
] | 1
|
2021-05-02T17:11:33.000Z
|
2021-05-02T17:11:33.000Z
|
from django.apps import AppConfig
class HospitalConfig(AppConfig):
name = 'Hospital'
class loginConfiq(AppConfig):
name = 'Pharmacy'
class PharmacyConfiq(AppConfig):
name = 'Pharmacy'
class Confiq(AppConfig):
name = 'Pharmacy'
class PharmacyConfiq(AppConfig):
name = 'Pharmacy'
class PharmacyConfiq(AppConfig):
name = 'Pharmacy'
class PharmacyConfiq(AppConfig):
name = 'Pharmacy'
class PharmacyConfiq(AppConfig):
name = 'Pharmacy'
class PharmacyConfiq(AppConfig):
name = 'Pharmacy'
class PharmacyConfiq(AppConfig):
name = 'Pharmacy'
=======
| 16.081081
| 33
| 0.702521
| 55
| 595
| 7.6
| 0.236364
| 0.311005
| 0.452153
| 0.497608
| 0.782297
| 0.782297
| 0.782297
| 0.782297
| 0.782297
| 0.624402
| 0
| 0
| 0.183193
| 595
| 36
| 34
| 16.527778
| 0.860082
| 0
| 0
| 0.727273
| 0
| 0
| 0.13468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.045455
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8b6c0c8308e1bd22ac4151aed290b7134c88ce41
| 10,346
|
py
|
Python
|
tests/automation_framework/tests/work_order_tests/test_submit_getresult.py
|
anjalirx-intel/avalon
|
5efd20612948a324b8a393bfe22872aeb8527097
|
[
"Apache-2.0"
] | null | null | null |
tests/automation_framework/tests/work_order_tests/test_submit_getresult.py
|
anjalirx-intel/avalon
|
5efd20612948a324b8a393bfe22872aeb8527097
|
[
"Apache-2.0"
] | null | null | null |
tests/automation_framework/tests/work_order_tests/test_submit_getresult.py
|
anjalirx-intel/avalon
|
5efd20612948a324b8a393bfe22872aeb8527097
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import logging
import os
import env
import json
from src.libs.avalon_test_wrapper \
import read_json, submit_request
from src.libs.test_base import AvalonBase
from src.utilities.verification_utils \
import verify_test, check_negative_test_responses
from src.utilities.worker_utilities import ResultStatus
from src.utilities.worker_utilities import GetResultWaitTime
import time
logger = logging.getLogger(__name__)
class TestClass():
test_obj = AvalonBase()
@pytest.mark.workordergetresult
@pytest.mark.listener
@pytest.mark.sdk
@pytest.mark.fabric
@pytest.mark.ethereum
@pytest.mark.positive
def test_workordergetresult_success(self):
test_id = '18702'
request_file = os.path.join(
env.work_order_input_file,
"work_order_getresult.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (
verify_test(
submit_response, 0,
self.test_obj.build_request_output['pre_test_output'],
self.test_obj.build_request_output['action_obj'])
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.sdk
@pytest.mark.fabric
@pytest.mark.ethereum
@pytest.mark.listener
@pytest.mark.negative
def test_workordergetresult_workorderid_different(self):
test_id = '18873'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_workorderid_different.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (
check_negative_test_responses(
submit_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.sdk
@pytest.mark.fabric
@pytest.mark.ethereum
@pytest.mark.listener
@pytest.mark.negative
def test_workordergetresult_workorderid_specialchar(self):
test_id = '18874'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_workorderid_specialchar.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (
check_negative_test_responses(
submit_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.sdk
@pytest.mark.fabric
@pytest.mark.ethereum
@pytest.mark.listener
@pytest.mark.negative
def test_workordergetresult_workorderid_null(self):
test_id = '18875'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_workorderid_null.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (
check_negative_test_responses(
submit_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.sdk
@pytest.mark.fabric
@pytest.mark.ethereum
@pytest.mark.listener
@pytest.mark.negative
def test_workordergetresult_workorderid_nonhexstring(self):
test_id = '18876'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_workorderid_nonhexstring.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (
check_negative_test_responses(
submit_response,
"Work order Id not found in the database. Hence invalid parameter")
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.sdk
@pytest.mark.fabric
@pytest.mark.ethereum
@pytest.mark.listener
@pytest.mark.negative
def test_workordergetresult_workorderid_alphabetsonly(self):
test_id = '18877'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_workorderid_alphabetsonly.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (
check_negative_test_responses(
submit_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.sdk
@pytest.mark.fabric
@pytest.mark.ethereum
@pytest.mark.listener
@pytest.mark.negative
def test_workordergetresult_workorderid_withoutquotes(self):
test_id = '18878'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_workorderid_withoutquotes.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (
check_negative_test_responses(
submit_response,
"Invalid params")
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.negative
def test_workordergetresult_emptyparameter(self):
test_id = '20322'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_emptyparameter.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (validate_response_code(submit_response, 2)
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.negative
def test_workordergetresult_unknownparameter(self):
test_id = '18879'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_unknownparameter.json")
err_cd = \
self.test_obj.setup_and_build_request_wo_getresult(
read_json(request_file))
submit_response = submit_request(
self.test_obj.uri_client,
self.test_obj.build_request_output['request_obj'],
env.wo_submit_output_json_file_name,
read_json(request_file))
logger.info("submit_response: \n%s\n", submit_response)
assert (validate_response_code(submit_response, 2)
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
@pytest.mark.workordergetresult
@pytest.mark.listener
@pytest.mark.negative
def test_workordergetresult_workorderId_empty(self):
test_id = '18729'
request_file = os.path.join(
env.work_order_input_file,
"workordergetresult_workorderId_empty.json")
msg_response = self.test_obj.post_json_msg(request_file)
logger.info("**********Received Response*********\n%s\n", msg_response)
assert (
check_negative_test_responses(
msg_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
logger.info('\t\t!!! Test completed !!!\n\n')
| 33.482201
| 87
| 0.634351
| 1,163
| 10,346
| 5.301806
| 0.092863
| 0.079468
| 0.053519
| 0.055465
| 0.85339
| 0.85339
| 0.825495
| 0.825495
| 0.818034
| 0.818034
| 0
| 0.007075
| 0.275952
| 10,346
| 308
| 88
| 33.590909
| 0.816046
| 0
| 0
| 0.778656
| 0
| 0
| 0.128359
| 0.043012
| 0
| 0
| 0
| 0
| 0.039526
| 1
| 0.039526
| false
| 0
| 0.043478
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
47b4a5878e1e64f755a3eee761157c514aaf5678
| 106
|
py
|
Python
|
couch/models/__init__.py
|
yssk22/gaecouch
|
1fc372ca6393c14904a318b04617457d0b5257c2
|
[
"Apache-2.0"
] | 4
|
2015-03-31T05:48:29.000Z
|
2018-07-24T01:40:00.000Z
|
couch/models/__init__.py
|
yssk22/gaecouch
|
1fc372ca6393c14904a318b04617457d0b5257c2
|
[
"Apache-2.0"
] | null | null | null |
couch/models/__init__.py
|
yssk22/gaecouch
|
1fc372ca6393c14904a318b04617457d0b5257c2
|
[
"Apache-2.0"
] | null | null | null |
from couch.models.database import *
from couch.models.document import *
from couch.models.util import *
| 17.666667
| 35
| 0.783019
| 15
| 106
| 5.533333
| 0.466667
| 0.325301
| 0.542169
| 0.506024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 5
| 36
| 21.2
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
47ccadbf071419b8db62bb5369214a6ee325f7c4
| 178
|
py
|
Python
|
org_desy_grid_virt_sort_release.py
|
osynge/grid_version_sort
|
c8e5d1af8f67034b58d2ec419727ea2aebe13b31
|
[
"MIT"
] | null | null | null |
org_desy_grid_virt_sort_release.py
|
osynge/grid_version_sort
|
c8e5d1af8f67034b58d2ec419727ea2aebe13b31
|
[
"MIT"
] | null | null | null |
org_desy_grid_virt_sort_release.py
|
osynge/grid_version_sort
|
c8e5d1af8f67034b58d2ec419727ea2aebe13b31
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import grid_virt_sort_release.org_desy_grid_virt_sort_release
if __name__ == "__main__":
grid_virt_sort_release.org_desy_grid_virt_sort_release.main()
| 29.666667
| 65
| 0.837079
| 29
| 178
| 4.310345
| 0.482759
| 0.256
| 0.384
| 0.608
| 0.72
| 0.72
| 0.72
| 0.72
| 0.72
| 0.72
| 0
| 0
| 0.078652
| 178
| 5
| 66
| 35.6
| 0.762195
| 0.11236
| 0
| 0
| 0
| 0
| 0.050955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 12
|
9a1bf26d6a49f1a0c338b02c9d8f26c8d5d66f8e
| 3,686
|
py
|
Python
|
tests/test_em_structures.py
|
stufisher/ispyb-api
|
c8e488274d565abd8387a485a3b5b80e1b2cedc9
|
[
"Apache-2.0"
] | 3
|
2019-07-17T22:09:40.000Z
|
2021-12-08T23:42:26.000Z
|
tests/test_em_structures.py
|
stufisher/ispyb-api
|
c8e488274d565abd8387a485a3b5b80e1b2cedc9
|
[
"Apache-2.0"
] | 137
|
2017-10-09T09:30:23.000Z
|
2022-03-31T11:31:12.000Z
|
tests/test_em_structures.py
|
stufisher/ispyb-api
|
c8e488274d565abd8387a485a3b5b80e1b2cedc9
|
[
"Apache-2.0"
] | 9
|
2016-10-06T14:58:06.000Z
|
2021-02-18T06:38:37.000Z
|
def test_insert_movie(testdb):
emacquisition = testdb.em_acquisition
group_params = emacquisition.get_data_collection_group_params()
group_params["parentid"] = 55168
group_id = emacquisition.insert_data_collection_group(list(group_params.values()))
collection_params = emacquisition.get_data_collection_params()
collection_params["parentid"] = group_id
dc_id = emacquisition.insert_data_collection(list(collection_params.values()))
params = emacquisition.get_movie_params()
params["dataCollectionId"] = dc_id
params["movieNumber"] = 1
params["positionX"] = 4.05
params["positionY"] = 8.01
movie_id = emacquisition.insert_movie(list(params.values()))
assert movie_id is not None
assert movie_id > 0
def test_insert_motion_correction(testdb):
emacquisition = testdb.em_acquisition
group_params = emacquisition.get_data_collection_group_params()
group_params["parentid"] = 55168
group_id = emacquisition.insert_data_collection_group(list(group_params.values()))
collection_params = emacquisition.get_data_collection_params()
collection_params["parentid"] = group_id
dc_id = emacquisition.insert_data_collection(list(collection_params.values()))
params = emacquisition.get_movie_params()
params["dataCollectionId"] = dc_id
params["movieNumber"] = 1
params["positionX"] = 4.05
params["positionY"] = 8.01
movie_id = emacquisition.insert_movie(list(params.values()))
motion_cor_id = emacquisition.insert_motion_correction(
movie_id=movie_id, dose_per_frame=20
)
assert motion_cor_id
def test_insert_ctf(testdb):
emacquisition = testdb.em_acquisition
group_params = emacquisition.get_data_collection_group_params()
group_params["parentid"] = 55168
group_id = emacquisition.insert_data_collection_group(list(group_params.values()))
collection_params = emacquisition.get_data_collection_params()
collection_params["parentid"] = group_id
dc_id = emacquisition.insert_data_collection(list(collection_params.values()))
params = emacquisition.get_movie_params()
params["dataCollectionId"] = dc_id
params["movieNumber"] = 1
params["positionX"] = 4.05
params["positionY"] = 8.01
movie_id = emacquisition.insert_movie(list(params.values()))
motion_cor_id = emacquisition.insert_motion_correction(
movie_id=movie_id, dose_per_frame=20
)
ctf_id = emacquisition.insert_ctf(motion_correction_id=motion_cor_id)
assert ctf_id
def test_insert_drift(testdb):
emacquisition = testdb.em_acquisition
group_params = emacquisition.get_data_collection_group_params()
group_params["parentid"] = 55168
group_id = emacquisition.insert_data_collection_group(list(group_params.values()))
collection_params = emacquisition.get_data_collection_params()
collection_params["parentid"] = group_id
dc_id = emacquisition.insert_data_collection(list(collection_params.values()))
params = emacquisition.get_movie_params()
params["dataCollectionId"] = dc_id
params["movieNumber"] = 1
params["positionX"] = 4.05
params["positionY"] = 8.01
movie_id = emacquisition.insert_movie(list(params.values()))
motion_cor_id = emacquisition.insert_motion_correction(
movie_id=movie_id, dose_per_frame=20
)
drift_params = emacquisition.get_motion_correction_drift_params()
drift_params["motionCorrectionId"] = motion_cor_id
drift_params["frameNumber"] = 12
drift_params["deltaX"] = 5
drift_params["deltaY"] = 6
drift_id = emacquisition.insert_motion_correction_drift(list(drift_params.values()))
assert drift_id is not None
| 38.8
| 88
| 0.754205
| 445
| 3,686
| 5.867416
| 0.110112
| 0.097664
| 0.136729
| 0.079663
| 0.843738
| 0.829567
| 0.829567
| 0.829567
| 0.829567
| 0.829567
| 0
| 0.018766
| 0.147043
| 3,686
| 94
| 89
| 39.212766
| 0.811705
| 0
| 0
| 0.753247
| 0
| 0
| 0.07732
| 0
| 0
| 0
| 0
| 0
| 0.064935
| 1
| 0.051948
| false
| 0
| 0
| 0
| 0.051948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d006631d58a86f980e920ecaae570ec2e2a76c77
| 18,278
|
py
|
Python
|
trackme/bin/trackme_rest_handler_maintenance.py
|
dritanbitincka/trackme
|
5b2e1eb6b8d46619d2ba053d985647410253e656
|
[
"Apache-2.0"
] | 1
|
2021-06-06T11:51:36.000Z
|
2021-06-06T11:51:36.000Z
|
trackme/bin/trackme_rest_handler_maintenance.py
|
duanshuaimin/trackme
|
78d0ec64e3ae2e40878b282ba2f375f978a28d73
|
[
"Apache-2.0"
] | null | null | null |
trackme/bin/trackme_rest_handler_maintenance.py
|
duanshuaimin/trackme
|
78d0ec64e3ae2e40878b282ba2f375f978a28d73
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os, sys
import splunk
import splunk.entity
import splunk.Intersplunk
import json
logger = logging.getLogger(__name__)
splunkhome = os.environ['SPLUNK_HOME']
sys.path.append(os.path.join(splunkhome, 'etc', 'apps', 'trackme', 'lib'))
import rest_handler
import splunklib.client as client
class TrackMeHandlerMaintenance_v1(rest_handler.RESTHandler):
def __init__(self, command_line, command_arg):
super(TrackMeHandlerMaintenance_v1, self).__init__(command_line, command_arg, logger)
# Get the entire data sources collection as a Python array
def get_maintenance_status(self, request_info, **kwargs):
describe = False
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
else:
# body is not required in this endpoint, if not submitted do not describe the usage
describe = False
if describe:
response = "{\"describe\": \"This endpoint retrieves the current maintenance mode collection returned as a JSON array, it requires a GET call with no data required\"}"\
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_maintenance_mode"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Render
return {
"payload": json.dumps(collection.data.query(), indent=1),
'status': 200 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Enable the maintenance mode
def post_maintenance_enable(self, request_info, **kwargs):
# Declare
maintenance_mode_start = None
maintenance_mode_end = None
maintenance_duration = None
maintenance_mode = "enabled"
update_comment = "API update"
import time
describe = False
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
if not describe:
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = []
# Get start and end maintenance, both are optionals
# maintenance_mode_start
try:
maintenance_mode_start = int(resp_dict['maintenance_mode_start'])
except Exception as e:
maintenance_mode_start = 0
# maintenance_mode_end
try:
maintenance_mode_end = int(resp_dict['maintenance_mode_end'])
except Exception as e:
maintenance_mode_end = 0
# maintenance_duration
try:
maintenance_duration = int(resp_dict['maintenance_duration'])
except Exception as e:
maintenance_duration = 0
# Update comment is optional and used for audit changes
try:
update_comment = resp_dict['update_comment']
except Exception as e:
update_comment = "API update"
else:
# body is not required in this endpoint
describe = False
if describe:
response = "{\"describe\": \"This endpoint enables the maintenance mode, it requires a POST call with the following information:\""\
+ ", \"options\" : [ { "\
+ "\"maintenance_duration\": \"(integer) OPTIONAL: the duration of the maintenance window in seconds, if unspecified and maintenance_mode_end is not specified either, defaults to now plus 24 hours\", "\
+ "\"maintenance_mode_end\": \"(integer) OPTIONAL: the date time in epochtime format for the end of the maintenance window, it is overriden by maintenance_duration if specified, defaults to now plus 24 hours if not specified and maintenance_duration is not specified\", "\
+ "\"maintenance_mode_start\": \"(integer) OPTIONAL: the date time in epochtime format for the start of the maintennce window, defaults to now if not specified\", "\
+ "\"update_comment\": \"OPTIONAL: a comment for the update, comments are added to the audit record, if unset will be defined to: API update\""\
+ " } ] }"
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Calculates start and end
time_updated = round(time.time())
# if maintenance start is not specified, starts at now
if (maintenance_mode_start is None) or (maintenance_mode_start <= 0):
maintenance_mode_start = str(round(time_updated))
# if maintenance end is not specified, and maintenance duration is not specified either, defaults to now + 24 hours
if (maintenance_mode_end is None) or (maintenance_mode_end <= 0):
maintenance_mode_end = str(round(time.time() + 86400))
# if maintenance duration is specified, it overrides the maintenance end whenever it is specified or not
if (maintenance_duration is not None) and (maintenance_duration > 0):
maintenance_mode_end = str(round(time.time() + maintenance_duration))
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_maintenance_mode"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Audit collection
collection_name_audit = "kv_trackme_audit_changes"
service_audit = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection_audit = service_audit.kvstore[collection_name_audit]
# Get the current record
# Notes: the record is returned as an array, as we search for a specific record, we expect one record only
try:
record = collection.data.query()
key = record[0].get('_key')
except Exception as e:
key = None
# Render result
if key is not None and len(key)>2:
# Update the record
collection.data.update(str(key), json.dumps({"maintenance_mode": str(maintenance_mode),
"time_updated": str(time_updated),
"maintenance_mode_start": str(maintenance_mode_start),
"maintenance_mode_end": str(maintenance_mode_end)}))
# Record an audit change
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "enable maintenance mode",
"object": "all",
"object_category": "all",
"object_attrs": str(json.dumps(collection.data.query_by_id(key), indent=1)),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
return {
"payload": json.dumps(collection.data.query(), indent=1),
'status': 200 # HTTP status code
}
else:
# Insert the record
collection.data.insert(json.dumps({"maintenance_mode": str(maintenance_mode),
"time_updated": str(time_updated),
"maintenance_mode_start": str(maintenance_mode_start),
"maintenance_mode_end": str(maintenance_mode_end)}))
# Record an audit change
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "enable maintenance mode",
"object": "all",
"object_category": "all",
"object_attrs": json.dumps({"maintenance_mode": str(maintenance_mode), "time_updated": str(time_updated), "maintenance_mode_start": str(maintenance_mode_start), "maintenance_mode_end": str(maintenance_mode_end)}, index=1),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Render
return {
"payload": json.dumps(collection.data.query(), indent=1),
'status': 200 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Disable the maintenance mode
def post_maintenance_disable(self, request_info, **kwargs):
import time
# Declare
maintenance_mode_start = "N/A"
maintenance_mode_end = "N/A"
maintenance_mode = "disabled"
update_comment = "API update"
describe = False
# Retrieve from data
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = None
if resp_dict is not None:
try:
describe = resp_dict['describe']
if describe in ("true", "True"):
describe = True
except Exception as e:
describe = False
if not describe:
try:
resp_dict = json.loads(str(request_info.raw_args['payload']))
except Exception as e:
resp_dict = []
# Update comment is optional and used for audit changes
try:
update_comment = resp_dict['update_comment']
except Exception as e:
update_comment = "API update"
else:
# body is not required in this endpoint
describe = False
if describe:
response = "{\"describe\": \"This endpoint disables the maintenance mode, it requires a POST call with the following information:\""\
+ ", \"options\" : [ { "\
+ "\"update_comment\": \"OPTIONAL: a comment for the update, comments are added to the audit record, if unset will be defined to: API update\""\
+ " } ] }"
return {
"payload": json.dumps(json.loads(str(response)), indent=1),
'status': 200 # HTTP status code
}
# Get splunkd port
entity = splunk.entity.getEntity('/server', 'settings',
namespace='trackme', sessionKey=request_info.session_key, owner='-')
splunkd_port = entity['mgmtHostPort']
try:
collection_name = "kv_trackme_maintenance_mode"
service = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection = service.kvstore[collection_name]
# Audit collection
collection_name_audit = "kv_trackme_audit_changes"
service_audit = client.connect(
owner="nobody",
app="trackme",
port=splunkd_port,
token=request_info.session_key
)
collection_audit = service_audit.kvstore[collection_name_audit]
# Get the current record
# Notes: the record is returned as an array, as we search for a specific record, we expect one record only
try:
record = collection.data.query()
key = record[0].get('_key')
except Exception as e:
key = None
# An ack record exists already in the collection, perform an update
time_updated = round(time.time())
# Render result
if key is not None and len(key)>2:
# Update the record
collection.data.update(str(key), json.dumps({"maintenance_mode": str(maintenance_mode),
"time_updated": str(time_updated),
"maintenance_mode_start": str(maintenance_mode_start),
"maintenance_mode_end": str(maintenance_mode_end)}))
# Record an audit change
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "disable maintenance mode",
"object": "all",
"object_category": "all",
"object_attrs": str(json.dumps(collection.data.query_by_id(key), indent=1)),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
return {
"payload": json.dumps(collection.data.query(), indent=1),
'status': 200 # HTTP status code
}
else:
# Insert the record
collection.data.insert(json.dumps({"maintenance_mode": str(maintenance_mode),
"time_updated": str(time_updated),
"maintenance_mode_start": str(maintenance_mode_start),
"maintenance_mode_end": str(maintenance_mode_end)}))
# Record an audit change
current_time = int(round(time.time() * 1000))
user = request_info.user
try:
# Insert the record
collection_audit.data.insert(json.dumps({
"time": str(current_time),
"user": str(user),
"action": "success",
"change_type": "disable maintenance mode",
"object": "all",
"object_category": "all",
"object_attrs": json.dumps({"maintenance_mode": str(maintenance_mode), "time_updated": str(time_updated), "maintenance_mode_start": str(maintenance_mode_start), "maintenance_mode_end": str(maintenance_mode_end)}, index=1),
"result": "N/A",
"comment": str(update_comment)
}))
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
# Render
return {
"payload": json.dumps(collection.data.query(), indent=1),
'status': 200 # HTTP status code
}
except Exception as e:
return {
'payload': 'Warn: exception encountered: ' + str(e) # Payload of the request.
}
| 39.223176
| 288
| 0.519477
| 1,771
| 18,278
| 5.191982
| 0.119706
| 0.117455
| 0.046982
| 0.043067
| 0.808265
| 0.788907
| 0.763893
| 0.758238
| 0.730832
| 0.730832
| 0
| 0.006766
| 0.393533
| 18,278
| 465
| 289
| 39.307527
| 0.822733
| 0.101652
| 0
| 0.806854
| 0
| 0
| 0.113781
| 0.017303
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012461
| false
| 0
| 0.031153
| 0
| 0.093458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d01e89135388afabff283ba2f34fd6515ada9eac
| 36,187
|
py
|
Python
|
tests/job/previous_api.py
|
alan-turing-institute/gateway-middleware-old
|
f09f6099f33f66ec95c4b24c0ae41eebfb68875c
|
[
"MIT"
] | 3
|
2017-08-03T07:40:08.000Z
|
2019-07-29T11:39:52.000Z
|
tests/job/previous_api.py
|
alan-turing-institute/gateway-middleware-old
|
f09f6099f33f66ec95c4b24c0ae41eebfb68875c
|
[
"MIT"
] | 58
|
2017-06-22T15:02:53.000Z
|
2018-01-08T16:06:01.000Z
|
tests/job/previous_api.py
|
alan-turing-institute/gateway-middleware-old
|
f09f6099f33f66ec95c4b24c0ae41eebfb68875c
|
[
"MIT"
] | null | null | null |
import json
import pytest
import unittest
import unittest.mock as mock
from werkzeug.exceptions import NotFound
from middleware.job.api import JobApi
from middleware.job.inmemory_repository import JobRepositoryMemory
from middleware.factory import create_app
CONFIG_NAME = "test"
@pytest.fixture
def test_client(job_repository=JobRepositoryMemory()):
app = create_app(CONFIG_NAME, job_repository)
return app.test_client()
def response_to_json(response):
data = response.get_data(as_text=True)
if not data:
return None
return json.loads(data)
class TestJobApi(unittest.TestCase):
def test_abort_if_not_found_throws_notfound_exception(self):
jobs = JobRepositoryMemory()
api = JobApi(job_repository=jobs)
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
with pytest.raises(NotFound):
api.abort_if_not_found(job_id)
# === GET tests (READ) ===
def test_get_for_existing_job_returns_job_with_200_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id,
"parameters": {"height": 3, "width": 4, "depth": 5}}
jobs.create(job)
client = test_client(jobs)
job_response = client.get("/api/job/{}".format(job_id))
assert job_response.status_code == 200
assert response_to_json(job_response) == job
def test_get_for_nonexistent_job_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_response = client.get("/api/job/{}".format(job_id))
error_message = {"message": "Job {} not found".format(job_id)}
assert job_response.status_code == 404
assert response_to_json(job_response) == error_message
def test_get_with_no_job_id_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_response = client.get("/api/job/")
assert job_response.status_code == 404
# No content check as we are expecting the standard 404 error message
# TODO: Get the 404 response defined for the app and compare it here
# === PUT tests (UPDATE) ===
def test_put_with_no_job_id_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_response = client.put("/api/job/")
assert job_response.status_code == 404
# No content check as we are expecting the standard 404 error message
# TODO: Get the 404 response defined for the app and compare it here
assert len(jobs._jobs) == 0
def test_put_with_empty_body_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job)
job = None
client = test_client(jobs)
job_response = client.put("/api/job/{}".format(job_id),
data=json.dumps(job),
content_type='application/json')
error_message = {"message": "Message body could not be parsed as JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
def test_put_with_nonjson_body_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job)
invalid_json = "{key-with-no-value}"
client = test_client(jobs)
# We don't add content_type='application/json' because, if we do the
# framework catches invalid JSON before it gets to our response handler
job_response = client.put("/api/job/{}".format(job_id),
data=invalid_json)
error_message = {"message": "Message body could not be parsed as JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
def test_put_with_mismatched_job_id_returns_error_with_409_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id_url = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_existing = {"id": job_id_url, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job_existing)
job_id_json = "59540b31-0454-4875-a00f-94eb4d81a09c"
job_new = {"id": job_id_json, "parameters": {"blue":
"high", "green": "low"}}
client = test_client(jobs)
job_response = client.put("/api/job/{}".format(job_id_url),
data=json.dumps(job_new),
content_type='application/json')
error_message = {"message": "Job ID in URL ({}) does not match job "
"ID in message JSON ({}).".format(job_id_url,
job_id_json)}
assert job_response.status_code == 409
assert response_to_json(job_response) == error_message
assert jobs.get_by_id(job_id_url) == job_existing
assert jobs.get_by_id(job_id_json) is None
def test_put_with_nonexistent_job_id_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_response = client.put("/api/job/{}".format(job_id))
error_message = {"message": "Job {} not found".format(job_id)}
assert job_response.status_code == 404
assert response_to_json(job_response) == error_message
def test_put_with_existing_job_id_returns_new_job_with_200_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_original = {"id": job_id, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job_original)
job_new = {"id": job_id, "parameters": {"blue":
"high", "green": "low"}}
client = test_client(jobs)
job_response = client.put("/api/job/{}".format(job_id),
data=json.dumps(job_new),
content_type='application/json')
assert job_response.status_code == 200
assert response_to_json(job_response) == job_new
assert jobs.get_by_id(job_id) == job_new
def test_put_with_invalid_job_json_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3, "width": 4,
"depth": 5}}
jobs.create(job)
client = test_client(jobs)
invalid_job = {"no-id-field": "valid-json"}
job_response = client.put("/api/job/{}".format(job_id),
data=json.dumps(invalid_job),
content_type='application/json')
error_message = {"message": "Message body is not valid Job JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
# === DELETE tests (DELETE) ===
def test_delete_with_no_job_id_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_response = client.delete("/api/job/")
assert job_response.status_code == 404
# No content check as we are expecting the standard 404 error message
# TODO: Get the 404 response defined for the app and compare it here
def test_delete_with_nonexistent_job_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_response = client.delete("/api/job/{}".format(job_id))
error_message = {"message": "Job {} not found".format(job_id)}
assert job_response.status_code == 404
assert response_to_json(job_response) == error_message
def test_delete_with_existing_job_id_returns_new_job_with_204_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3, "width": 4,
"depth": 5}}
jobs.create(job)
client = test_client(jobs)
job_response = client.delete("/api/job/{}".format(job_id))
assert job_response.status_code == 204
assert response_to_json(job_response) is None
assert jobs.get_by_id(job_id) is None
# === POST tests ===
def test_post_with_valid_json_correct_id_returns_new_job_success_200(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/job/{}".format(job_id),
data=json.dumps(job),
content_type='application/json')
assert response_to_json(job_response) == job
assert job_response.status_code == 200
def test_post_with_valid_json_and_incorrect_id_returns_error_404(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job = {"id": job_id[::-1], "parameters": {"height": 3}}
job_response = client.post("/api/job/{}".format(job_id),
data=json.dumps(job),
content_type='application/json')
error_message = {"message": "Job {} not found".format(job_id[::-1])}
assert response_to_json(job_response) == error_message
assert job_response.status_code == 404
def test_post_with_missing_json_returns_error_400(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/job/{}".format(job_id),
data=None,
content_type=None)
error_message = {"message": "Message body could not be parsed as JSON"}
assert response_to_json(job_response) == error_message
assert job_response.status_code == 400
def test_post_with_invalid_json_returns_error_400(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
broken_json = {'test': 5}
job_response = client.post("/api/job/{}".format(job_id),
data=json.dumps(broken_json),
content_type='application/json')
error_message = {"message": "Message body is not valid Job JSON"}
assert response_to_json(job_response) == error_message
assert job_response.status_code == 400
# === PATCH tests (Partial UPDATE) ===
def test_patch_with_no_job_id_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_response = client.patch("/api/job/")
assert job_response.status_code == 404
# No content check as we are expecting the standard 404 error message
# TODO: Get the 404 response defined for the app and compare it here
assert len(jobs._jobs) == 0
def test_patch_with_empty_body_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job)
job = None
client = test_client(jobs)
job_response = client.patch("/api/job/{}".format(job_id),
data=json.dumps(job),
content_type='application/json-patch+json')
error_message = {"message": "Message body could not be parsed as JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
def test_patch_with_nonjson_body_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job)
invalid_json = "{key-with-no-value}"
client = test_client(jobs)
# We don't add content_type='application/json' because, if we do the
# framework catches invalid JSON before it gets to our response handler
job_response = client.patch("/api/job/{}".format(job_id),
data=invalid_json)
error_message = {"message": "Message body could not be parsed as JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
def test_patch_with_nonexistent_job_id_returns_error_with_404_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_response = client.patch("/api/job/{}".format(job_id))
error_message = {"message": "Job {} not found".format(job_id)}
assert job_response.status_code == 404
assert response_to_json(job_response) == error_message
def test_patch_with_mismatched_job_id_returns_error_with_409_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id_url = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_existing = {"id": job_id_url, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job_existing)
job_id_json = "59540b31-0454-4875-a00f-94eb4d81a09c"
job_new = {"id": job_id_json, "parameters": {"height":
7, "green": "low", "depth": None}}
client = test_client(jobs)
job_response = client.patch(
"/api/job/{}".format(job_id_url),
data=json.dumps(job_new),
content_type='application/merge-patch+json')
error_message = {"message": "Job ID in URL ({}) does not match job "
"ID in message JSON ({}).".format(job_id_url,
job_id_json)}
assert job_response.status_code == 409
assert response_to_json(job_response) == error_message
assert jobs.get_by_id(job_id_url) == job_existing
assert jobs.get_by_id(job_id_json) is None
def test_patch_with_existing_job_id_returns_new_job_with_200_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_original = {"id": job_id, "parameters": {"height": 3,
"width": 4, "depth": 5}}
jobs.create(job_original)
job_patch = {"parameters": {"height":
7, "green": "low", "depth": None}}
job_new_expected = {"id": job_id, "parameters": {"height": 7,
"width": 4, "green": "low"}}
client = test_client(jobs)
job_response = client.patch(
"/api/job/{}".format(job_id),
data=json.dumps(job_patch),
content_type='application/merge-patch+json')
assert job_response.status_code == 200
assert response_to_json(job_response) == job_new_expected
assert jobs.get_by_id(job_id) == job_new_expected
class TestRunApi(object):
@mock.patch('middleware.job_information_manager.job_information_manager.'
'_run_remote_script', side_effect=mock_run_remote)
@mock.patch('middleware.job_information_manager.job_information_manager.'
'patch_all_templates', side_effect=mock_patch_all)
@mock.patch('middleware.job_information_manager.job_information_manager.'
'transfer_all_files', side_effect=mock_transfer_all)
def test_run_with_valid_id(self, mock_transfer, mock_patch, mock_run):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create full job
job = {
"id": "d769843b-6f37-4939-96c7-c382c3e74b46",
"templates": [
{
"source_uri": "./resources/templates/Blue.nml",
"destination_path": "project/case/"
}
],
"scripts": [
{"source_uri": "./resources/scripts/start_job.sh",
"destination_path": "project/case/", "action": "RUN"},
{"source_uri": "./resources/scripts/cancel_job.sh",
"destination_path": "project/case/", "action": "CANCEL"},
{"source_uri": "./resources/scripts/progress_job.sh",
"destination_path": "project/case/", "action": "PROGRESS"},
{"source_uri": "./resources/scripts/setup_job.sh",
"destination_path": "project/case/", "action": "SETUP"}
],
"parameters": {
"viscosity_properties": {
"viscosity_phase_1": "42.0"
}
},
"inputs": []
}
job_id = job['id']
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/run/{}".format(job_id),
data=json.dumps(job),
content_type='application/json')
assert response_to_json(job_response)['stdout'] == 'start_job.sh'
assert job_response.status_code == 200
def test_run_with_invalid_id(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
bad_id = "2s3"
job_response = client.post("/api/run/{}".format(bad_id),
data=json.dumps(job),
content_type='application/json')
err_message = {'message': ('Job {0} not found. You have requested '
'this URI [/api/run/{0}] but did'
' you mean /api/run/<string:job_id> '
'?').format(bad_id)}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 404
def test_run_with_no_json(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/run/{}".format(job_id))
err_message = {'message': ('Message body could not be parsed as JSON')}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 400
def test_run_with_invalid_json(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
broken_json = {'test': 5}
job_response = client.post("/api/run/{}".format(job_id),
data=json.dumps(broken_json),
content_type='application/json')
err_message = {'message': ('Message body is not valid Job JSON')}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 400
class TestSetupApi(object):
@mock.patch('middleware.job_information_manager.job_information_manager.'
'_run_remote_script', side_effect=mock_run_remote)
@mock.patch('middleware.job_information_manager.job_information_manager.'
'patch_all_templates', side_effect=mock_patch_all)
@mock.patch('middleware.job_information_manager.job_information_manager.'
'transfer_all_files', side_effect=mock_transfer_all)
def test_setup_with_valid_id(self, mock_transfer, mock_patch, mock_run):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create full job
job = {
"id": "d769843b-6f37-4939-96c7-c382c3e74b46",
"templates": [
{
"source_uri": "./resources/templates/Blue.nml",
"destination_path": "project/case/"
}
],
"scripts": [
{"source_uri": "./resources/scripts/start_job.sh",
"destination_path": "project/case/", "action": "RUN"},
{"source_uri": "./resources/scripts/cancel_job.sh",
"destination_path": "project/case/", "action": "CANCEL"},
{"source_uri": "./resources/scripts/progress_job.sh",
"destination_path": "project/case/", "action": "PROGRESS"},
{"source_uri": "./resources/scripts/setup_job.sh",
"destination_path": "project/case/", "action": "SETUP"}
],
"parameters": {
"viscosity_properties": {
"viscosity_phase_1": "42.0"
}
},
"inputs": []
}
job_id = job['id']
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/setup/{}".format(job_id),
data=json.dumps(job),
content_type='application/json')
assert response_to_json(job_response)['stdout'] == 'setup_job.sh'
assert job_response.status_code == 200
def test_setup_with_invalid_id(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
bad_id = "2s3"
job_response = client.post("/api/setup/{}".format(bad_id),
data=json.dumps(job),
content_type='application/json')
err_message = {'message': ('Job {0} not found. You have requested '
'this URI [/api/setup/{0}] but did '
'you mean /api/setup/<string:job_id> or '
'/api/run/<string:job_id> '
'?').format(bad_id)}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 404
def test_setup_with_no_json(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/setup/{}".format(job_id))
err_message = {'message': ('Message body could not be parsed as JSON')}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 400
def test_setup_with_invalid_json(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
broken_json = {'test': 5}
job_response = client.post("/api/setup/{}".format(job_id),
data=json.dumps(broken_json),
content_type='application/json')
err_message = {'message': ('Message body is not valid Job JSON')}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 400
class TestCancelApi(object):
@mock.patch('middleware.job_information_manager.job_information_manager.'
'_run_remote_script', side_effect=mock_run_remote)
def test_cancel_with_valid_id(self, mock_run):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create full job
job = {
"id": "d769843b-6f37-4939-96c7-c382c3e74b46",
"templates": [
{
"source_uri": "./resources/templates/Blue.nml",
"destination_path": "project/case/"
}
],
"scripts": [
{"source_uri": "./resources/scripts/start_job.sh",
"destination_path": "project/case/", "action": "RUN"},
{"source_uri": "./resources/scripts/cancel_job.sh",
"destination_path": "project/case/", "action": "CANCEL"},
{"source_uri": "./resources/scripts/progress_job.sh",
"destination_path": "project/case/", "action": "PROGRESS"},
{"source_uri": "./resources/scripts/setup_job.sh",
"destination_path": "project/case/", "action": "SETUP"}
],
"parameters": {
"viscosity_properties": {
"viscosity_phase_1": "42.0"
}
},
"inputs": []
}
job_id = job['id']
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/cancel/{}".format(job_id),
data=json.dumps(job),
content_type='application/json')
assert response_to_json(job_response)['stdout'] == 'cancel_job.sh'
assert job_response.status_code == 200
def test_cancel_with_invalid_id(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
bad_id = "2s3"
job_response = client.post("/api/cancel/{}".format(bad_id),
data=json.dumps(job),
content_type='application/json')
err_message = {'message': ('Job {0} not found. You have requested '
'this URI [/api/cancel/{0}] but did '
'you mean /api/cancel/<string:job_id> '
'?').format(bad_id)}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 404
class TestProgressApi(object):
@mock.patch('middleware.job_information_manager.job_information_manager.'
'_run_remote_script', side_effect=mock_run_remote)
def test_progress_with_valid_id(self, mock_run):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create full job
job = {
"id": "d769843b-6f37-4939-96c7-c382c3e74b46",
"templates": [
{
"source_uri": "./resources/templates/Blue.nml",
"destination_path": "project/case/"
}
],
"scripts": [
{"source_uri": "./resources/scripts/start_job.sh",
"destination_path": "project/case/", "action": "RUN"},
{"source_uri": "./resources/scripts/cancel_job.sh",
"destination_path": "project/case/", "action": "CANCEL"},
{"source_uri": "./resources/scripts/progress_job.sh",
"destination_path": "project/case/", "action": "PROGRESS"},
{"source_uri": "./resources/scripts/setup_job.sh",
"destination_path": "project/case/", "action": "SETUP"}
],
"parameters": {
"viscosity_properties": {
"viscosity_phase_1": "42.0"
}
},
"inputs": []
}
job_id = job['id']
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
job_response = client.post("/api/progress/{}".format(job_id),
data=json.dumps(job),
content_type='application/json')
assert response_to_json(job_response)['stdout'] == 'progress_job.sh'
assert job_response.status_code == 200
def test_cancel_with_invalid_id(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
# Create skeleton job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id, "parameters": {"height": 3}}
client.post("/api/job", data=json.dumps(job),
content_type='application/json')
bad_id = "2s3"
job_response = client.post("/api/progress/{}".format(bad_id),
data=json.dumps(job),
content_type='application/json')
err_message = {'message': ('Job {0} not found. You have requested '
'this URI [/api/progress/{0}] but did '
'you mean /api/progress/'
'<string:job_id> ?').format(bad_id)}
assert response_to_json(job_response) == err_message
assert job_response.status_code == 404
class TestJobsApi(object):
# === GET tests (LIST) ===
def test_get_returns_object_summary_list(self):
jobs = JobRepositoryMemory()
# Create job
job_id_1 = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_1 = {"id": job_id_1, "parameters": {"height": 11, "width": 12,
"depth": 13}}
job_id_2 = "53835db6-87cb-4dd8-a91f-5c98100c0b82"
job_2 = {"id": job_id_2, "parameters": {"height": 21, "width": 22,
"depth": 23}}
job_id_3 = "781692cc-b71c-469e-a8e9-938c2fda89f2"
job_3 = {"id": job_id_3, "parameters": {"height": 31, "width": 32,
"depth": 33}}
jobs.create(job_1)
jobs.create(job_2)
jobs.create(job_3)
client = test_client(jobs)
def job_uri(job_id):
return "/api/job/{}".format(job_id)
expected_response = [{"id": job_id_1, "uri": job_uri(job_id_1)},
{"id": job_id_2, "uri": job_uri(job_id_2)},
{"id": job_id_3, "uri": job_uri(job_id_3)}]
job_response = client.get("/api/job")
assert job_response.status_code == 200
# Both lists of dictionaries need to have same sort order to
# successfully compare
assert response_to_json(job_response).sort(key=lambda x: x["id"]) == \
expected_response.sort(key=lambda x: x["id"])
# === POST tests (CREATE) ===
def test_post_for_nonexistent_job_returns_job_with_200_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job = {"id": job_id,
"parameters": {"height": 3, "width": 4, "depth": 5}}
client = test_client(jobs)
job_response = client.post("/api/job", data=json.dumps(job),
content_type='application/json')
assert job_response.status_code == 200
assert response_to_json(job_response) == job
assert jobs.get_by_id(job_id) == job
def test_post_for_existing_job_returns_error_with_409_status(self):
jobs = JobRepositoryMemory()
# Create job
job_id = "d769843b-6f37-4939-96c7-c382c3e74b46"
job_existing = {"id": job_id, "parameters": {"height": 3, "width": 4,
"depth": 5}}
jobs.create(job_existing)
job_new = {"id": job_id, "parameters": {"blue": "high",
"green": "low"}}
client = test_client(jobs)
job_response = client.post("/api/job", data=json.dumps(job_new),
content_type='application/json')
error_message = {"message": "Job with ID {} already "
"exists".format(job_id)}
assert job_response.status_code == 409
assert response_to_json(job_response) == error_message
assert jobs.get_by_id(job_id) == job_existing
def test_post_with_none_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
client = test_client(jobs)
job_response = client.post("/api/job", data=json.dumps(None),
content_type='application/json')
error_message = {"message": "Message body could not be parsed as JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
assert len(jobs._jobs) == 0
def test_post_with_nonjson_body_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
invalid_json = "{key-with-no-value}"
client = test_client(jobs)
# We don't add content_type='application/json' because, if we do the
# framework catches invalid JSON before it gets to our response handler
job_response = client.post("/api/job", data=invalid_json)
error_message = {"message": "Message body could not be parsed as JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
def test_post_with_invalid_job_json_returns_error_with_400_status(self):
jobs = JobRepositoryMemory()
invalid_job = {"no-id-field": "valid-json"}
client = test_client(jobs)
job_response = client.post("/api/job", data=json.dumps(invalid_job),
content_type='application/json')
error_message = {"message": "Message body is not valid Job JSON"}
assert job_response.status_code == 400
assert response_to_json(job_response) == error_message
| 42.572941
| 79
| 0.581618
| 4,109
| 36,187
| 4.848382
| 0.058165
| 0.044423
| 0.017569
| 0.041161
| 0.914717
| 0.908594
| 0.894288
| 0.880032
| 0.871348
| 0.864522
| 0
| 0.05089
| 0.298422
| 36,187
| 849
| 80
| 42.623086
| 0.733811
| 0.04521
| 0
| 0.740854
| 0
| 0
| 0.223095
| 0.07849
| 0
| 0
| 0
| 0.001178
| 0.137195
| 1
| 0.068598
| false
| 0
| 0.012195
| 0.001524
| 0.096037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d0bc6c892e56244480b67d43b8246335f0520f6e
| 41,441
|
py
|
Python
|
ross/tests/test_rubbing.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 69
|
2018-12-26T19:21:26.000Z
|
2022-02-10T08:48:03.000Z
|
ross/tests/test_rubbing.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 639
|
2018-12-18T16:44:11.000Z
|
2022-03-27T16:46:41.000Z
|
ross/tests/test_rubbing.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 136
|
2019-01-08T12:37:32.000Z
|
2022-03-30T07:14:35.000Z
|
import os
from pathlib import Path
from tempfile import tempdir
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
import ross as rs
from ross.defects.misalignment import MisalignmentFlex
from ross.units import Q_
steel2 = rs.Material(name="Steel", rho=7850, E=2.17e11, Poisson=0.2992610837438423)
# Rotor with 6 DoFs, with internal damping, with 10 shaft elements, 2 disks and 2 bearings.
i_d = 0
o_d = 0.019
n = 33
# fmt: off
L = np.array(
[0 , 25, 64, 104, 124, 143, 175, 207, 239, 271,
303, 335, 345, 355, 380, 408, 436, 466, 496, 526,
556, 586, 614, 647, 657, 667, 702, 737, 772, 807,
842, 862, 881, 914]
)/ 1000
# fmt: on
L = [L[i] - L[i - 1] for i in range(1, len(L))]
shaft_elem = [
rs.ShaftElement6DoF(
material=steel2,
L=l,
idl=i_d,
odl=o_d,
idr=i_d,
odr=o_d,
alpha=8.0501,
beta=1.0e-5,
rotary_inertia=True,
shear_effects=True,
)
for l in L
]
Id = 0.003844540885417
Ip = 0.007513248437500
disk0 = rs.DiskElement6DoF(n=12, m=2.6375, Id=Id, Ip=Ip)
disk1 = rs.DiskElement6DoF(n=24, m=2.6375, Id=Id, Ip=Ip)
kxx1 = 4.40e5
kyy1 = 4.6114e5
kzz = 0
cxx1 = 27.4
cyy1 = 2.505
czz = 0
kxx2 = 9.50e5
kyy2 = 1.09e8
cxx2 = 50.4
cyy2 = 100.4553
bearing0 = rs.BearingElement6DoF(
n=4, kxx=kxx1, kyy=kyy1, cxx=cxx1, cyy=cyy1, kzz=kzz, czz=czz
)
bearing1 = rs.BearingElement6DoF(
n=31, kxx=kxx2, kyy=kyy2, cxx=cxx2, cyy=cyy2, kzz=kzz, czz=czz
)
rotor = rs.Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])
@pytest.fixture
def rub():
unbalance_magnitudet = np.array([5e-4, 0])
unbalance_phaset = np.array([-np.pi / 2, 0])
rubbing = rotor.run_rubbing(
dt=0.001,
tI=0,
tF=0.5,
deltaRUB=7.95e-5,
kRUB=1.1e6,
cRUB=40,
miRUB=0.3,
posRUB=12,
speed=125.66370614359172,
unbalance_magnitude=unbalance_magnitudet,
unbalance_phase=unbalance_phaset,
print_progress=True,
)
return rubbing
@pytest.fixture
def rub_units():
unbalance_magnitudet = Q_(np.array([0.043398083107259365, 0]), "lb*in")
unbalance_phaset = Q_(np.array([-90.0, 0.0]), "degrees")
rubbing = rotor.run_rubbing(
dt=0.001,
tI=0,
tF=0.5,
deltaRUB=7.95e-5,
kRUB=1.1e6,
cRUB=40,
miRUB=0.3,
posRUB=12,
speed=Q_(1200, "RPM"),
unbalance_magnitude=unbalance_magnitudet,
unbalance_phase=unbalance_phaset,
print_progress=True,
)
return rubbing
def test_rub_parameters(rub):
assert rub.dt == 0.001
assert rub.tI == 0
assert rub.tF == 0.5
assert rub.deltaRUB == 7.95e-5
assert rub.kRUB == 1.1e6
assert rub.cRUB == 40
assert rub.miRUB == 0.3
assert rub.posRUB == 12
assert rub.speed == 125.66370614359172
def test_rub_parameters_units(rub_units):
assert rub_units.dt == 0.001
assert rub_units.tI == 0
assert rub_units.tF == 0.5
assert rub_units.deltaRUB == 7.95e-5
assert rub_units.kRUB == 1.1e6
assert rub_units.cRUB == 40
assert rub_units.miRUB == 0.3
assert rub_units.posRUB == 12
assert rub_units.speed == 125.66370614359172
def test_rub_forces(rub):
assert rub.forces_rub[rub.posRUB * 6, :] == pytest.approx(
# fmt: off
np.array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.34106430e+00, 2.38612745e+00, 2.49817920e+00, 1.81347173e+00,
5.98445473e-01, -1.62547004e+00, -4.23917250e+00, -6.00118806e+00,
-6.20373067e+00, -4.73612273e+00, -2.73184699e+00, -2.58971885e+00,
-6.09874812e+00, -1.19048343e+01, -1.62930120e+01, -1.64120039e+01,
-1.22951018e+01, -6.47561269e+00, -1.92419235e+00, 3.53374623e-02,
1.47810258e-01, 6.34772882e-02, 1.83806641e-01, 4.43954908e-01,
5.92533172e-01, 5.51738633e-01, 6.18865825e-01, 1.16291085e+00,
1.99527847e+00, 2.28657481e+00, 1.21810729e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.55680614e-01,
-1.15087399e+00, -1.31570879e+00, -5.73686616e-01, 1.75075759e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.35857558e+00, 4.63547122e+00, 8.26625410e+00, 1.07680988e+01,
1.13286366e+01, 9.96961037e+00, 7.44128958e+00, 4.81928761e+00,
2.91576005e+00, 1.93968467e+00, 1.61560462e+00, 1.51666598e+00,
1.30530274e+00, 8.25200469e-01, 1.14260074e-01, -1.19244337e+00,
-2.29090277e+00, -2.84242534e+00, -2.70568998e+00, -1.71797718e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-3.62954221e-01, -2.15664186e-02, -5.82573814e-01, -1.27145991e+00,
-1.38207772e+00, -8.36333007e-01, 0.00000000e+00, -1.52638197e+00,
-3.88459875e+00, -5.84147533e+00, -5.55533169e+00, -2.84156510e+00,
0.00000000e+00, 0.00000000e+00, -9.98460245e-01, -5.03924706e+00,
-7.75632048e+00, -7.28589841e+00, -4.21050757e+00, -6.78335856e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
4.97669123e-01, 9.43108661e-01, 1.21217301e+00, 1.02059004e+00,
5.88846364e-01, 4.25532247e-01, 7.49102363e-01, 1.21248611e+00,
1.23719949e+00, 5.26714086e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 8.30579340e-01, 2.26273248e+00,
2.80172735e+00, 2.36060364e+00, 1.47121281e+00, 9.05484679e-01,
1.10607178e+00, 1.80305085e+00, 2.28352354e+00, 2.03304519e+00,
1.10298146e+00, 3.77694650e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -3.22380450e+00,
-5.68021237e+00, -5.83462167e+00, -3.25195544e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -9.90196556e-01,
-2.90885128e+00, -2.81169132e+00, -8.81117993e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 2.69024403e-01, 3.11412852e-01, 5.58798236e-01,
6.40371175e-01, 3.49164726e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.36971006e-01,
1.21346945e+00, 2.18249083e+00, 2.84208082e+00, 2.99156571e+00,
2.55421246e+00, 1.59111859e+00, 3.14246711e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-9.16343507e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -1.16296507e+00, -3.23286551e+00,
-4.51354090e+00, -3.98257895e+00, -1.81166144e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 2.12219345e-01, 2.21171647e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -9.56292801e-01, -1.33271793e+00,
-1.03821500e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00])
# fmt: on
)
assert rub.forces_rub[rub.posRUB * 6 + 1, :] == pytest.approx(
# fmt: off
np.array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.04165157e+00, -4.37810280e+00, -6.57666995e+00, -8.28694141e+00,
-9.37791126e+00, -9.76081035e+00, -9.25783043e+00, -7.68973505e+00,
-5.19539491e+00, -2.48084518e+00, -6.09414243e-01, -1.40464287e-01,
-2.91868675e-01, 5.94864394e-01, 3.76903307e+00, 7.17867399e+00,
8.50615176e+00, 6.92755001e+00, 3.76919148e+00, 1.38886654e+00,
1.14793265e+00, 2.47476310e+00, 3.69518317e+00, 3.56151341e+00,
2.15845164e+00, 7.22745269e-01, 4.64776971e-01, 1.32361970e+00,
2.08542187e+00, 1.74043906e+00, 3.68327123e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.40526270e+00,
3.91183574e+00, 6.08879987e+00, 6.05701802e+00, 3.35127909e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.33684009e+00, 4.74117308e+00, 5.66393572e+00, 3.75144548e+00,
2.83459537e-01, -1.61445732e+00, -2.61883874e+00, -2.76818696e+00,
-2.47679180e+00, -2.29014373e+00, -2.50794047e+00, -3.08754938e+00,
-3.77455164e+00, -4.26560645e+00, -4.33448168e+00, -3.93383052e+00,
-3.19736872e+00, -2.30738448e+00, -1.36778475e+00, -4.13019001e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.30731635e-01, -2.71055349e+00, -3.77777456e+00, -3.19820518e+00,
-1.62052825e+00, -1.57786580e-01, 0.00000000e+00, -3.39194075e-01,
-1.19486567e+00, -1.40982868e+00, -8.24731765e-01, 1.77245380e-02,
0.00000000e+00, 0.00000000e+00, 1.12393272e+00, 3.42883960e+00,
6.44392419e+00, 7.74026022e+00, 6.02300041e+00, 2.15021680e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.31206244e+00, 2.88224433e+00, 2.70442915e+00, 1.31540273e+00,
6.83579157e-02, -1.44657955e-01, -9.74069771e-04, 2.01609711e-01,
7.58795510e-02, -1.14712672e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -1.16918168e-01, -9.48748823e-02,
-3.74701248e-01, -6.68192840e-01, -7.77720853e-01, -8.15718667e-01,
-1.19404470e+00, -2.17926682e+00, -3.40215785e+00, -3.99418641e+00,
-3.25449483e+00, -1.22032570e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.72304245e+00,
-2.63776333e+00, -2.11110151e+00, -7.22241992e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.37988828e-01,
-1.36497601e-01, 1.32278945e-01, 5.15571508e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 8.42548433e-01, 2.72091640e+00, 3.24542027e+00,
2.03399132e+00, 3.77699186e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.99799699e-01,
5.92260640e-02, -1.02272668e-02, -3.19939917e-01, -7.49813663e-01,
-1.06945353e+00, -1.04483902e+00, -5.36963585e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-3.23192576e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 1.42596962e-01, -1.71901114e-01,
-7.45893810e-02, 3.71940362e-01, 6.24286373e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -9.16154067e-02, -8.72890060e-02,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 8.36067934e-01, 1.13854180e+00,
4.79721529e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00])
# fmt: on
)
def test_rub_forces_units(rub_units):
assert rub_units.forces_rub[rub_units.posRUB * 6, :] == pytest.approx(
# fmt: off
np.array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.34106430e+00, 2.38612745e+00, 2.49817920e+00, 1.81347173e+00,
5.98445473e-01, -1.62547004e+00, -4.23917250e+00, -6.00118806e+00,
-6.20373067e+00, -4.73612273e+00, -2.73184699e+00, -2.58971885e+00,
-6.09874812e+00, -1.19048343e+01, -1.62930120e+01, -1.64120039e+01,
-1.22951018e+01, -6.47561269e+00, -1.92419235e+00, 3.53374623e-02,
1.47810258e-01, 6.34772882e-02, 1.83806641e-01, 4.43954908e-01,
5.92533172e-01, 5.51738633e-01, 6.18865825e-01, 1.16291085e+00,
1.99527847e+00, 2.28657481e+00, 1.21810729e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.55680614e-01,
-1.15087399e+00, -1.31570879e+00, -5.73686616e-01, 1.75075759e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.35857558e+00, 4.63547122e+00, 8.26625410e+00, 1.07680988e+01,
1.13286366e+01, 9.96961037e+00, 7.44128958e+00, 4.81928761e+00,
2.91576005e+00, 1.93968467e+00, 1.61560462e+00, 1.51666598e+00,
1.30530274e+00, 8.25200469e-01, 1.14260074e-01, -1.19244337e+00,
-2.29090277e+00, -2.84242534e+00, -2.70568998e+00, -1.71797718e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-3.62954221e-01, -2.15664186e-02, -5.82573814e-01, -1.27145991e+00,
-1.38207772e+00, -8.36333007e-01, 0.00000000e+00, -1.52638197e+00,
-3.88459875e+00, -5.84147533e+00, -5.55533169e+00, -2.84156510e+00,
0.00000000e+00, 0.00000000e+00, -9.98460245e-01, -5.03924706e+00,
-7.75632048e+00, -7.28589841e+00, -4.21050757e+00, -6.78335856e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
4.97669123e-01, 9.43108661e-01, 1.21217301e+00, 1.02059004e+00,
5.88846364e-01, 4.25532247e-01, 7.49102363e-01, 1.21248611e+00,
1.23719949e+00, 5.26714086e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 8.30579340e-01, 2.26273248e+00,
2.80172735e+00, 2.36060364e+00, 1.47121281e+00, 9.05484679e-01,
1.10607178e+00, 1.80305085e+00, 2.28352354e+00, 2.03304519e+00,
1.10298146e+00, 3.77694650e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -3.22380450e+00,
-5.68021237e+00, -5.83462167e+00, -3.25195544e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -9.90196556e-01,
-2.90885128e+00, -2.81169132e+00, -8.81117993e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 2.69024403e-01, 3.11412852e-01, 5.58798236e-01,
6.40371175e-01, 3.49164726e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.36971006e-01,
1.21346945e+00, 2.18249083e+00, 2.84208082e+00, 2.99156571e+00,
2.55421246e+00, 1.59111859e+00, 3.14246711e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-9.16343507e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -1.16296507e+00, -3.23286551e+00,
-4.51354090e+00, -3.98257895e+00, -1.81166144e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 2.12219345e-01, 2.21171647e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -9.56292801e-01, -1.33271793e+00,
-1.03821500e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00])
# fmt: on
)
assert rub_units.forces_rub[rub_units.posRUB * 6 + 1, :] == pytest.approx(
# fmt: off
np.array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.04165157e+00, -4.37810280e+00, -6.57666995e+00, -8.28694141e+00,
-9.37791126e+00, -9.76081035e+00, -9.25783043e+00, -7.68973505e+00,
-5.19539491e+00, -2.48084518e+00, -6.09414243e-01, -1.40464287e-01,
-2.91868675e-01, 5.94864394e-01, 3.76903307e+00, 7.17867399e+00,
8.50615176e+00, 6.92755001e+00, 3.76919148e+00, 1.38886654e+00,
1.14793265e+00, 2.47476310e+00, 3.69518317e+00, 3.56151341e+00,
2.15845164e+00, 7.22745269e-01, 4.64776971e-01, 1.32361970e+00,
2.08542187e+00, 1.74043906e+00, 3.68327123e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.40526270e+00,
3.91183574e+00, 6.08879987e+00, 6.05701802e+00, 3.35127909e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.33684009e+00, 4.74117308e+00, 5.66393572e+00, 3.75144548e+00,
2.83459537e-01, -1.61445732e+00, -2.61883874e+00, -2.76818696e+00,
-2.47679180e+00, -2.29014373e+00, -2.50794047e+00, -3.08754938e+00,
-3.77455164e+00, -4.26560645e+00, -4.33448168e+00, -3.93383052e+00,
-3.19736872e+00, -2.30738448e+00, -1.36778475e+00, -4.13019001e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.30731635e-01, -2.71055349e+00, -3.77777456e+00, -3.19820518e+00,
-1.62052825e+00, -1.57786580e-01, 0.00000000e+00, -3.39194075e-01,
-1.19486567e+00, -1.40982868e+00, -8.24731765e-01, 1.77245380e-02,
0.00000000e+00, 0.00000000e+00, 1.12393272e+00, 3.42883960e+00,
6.44392419e+00, 7.74026022e+00, 6.02300041e+00, 2.15021680e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.31206244e+00, 2.88224433e+00, 2.70442915e+00, 1.31540273e+00,
6.83579157e-02, -1.44657955e-01, -9.74069771e-04, 2.01609711e-01,
7.58795510e-02, -1.14712672e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -1.16918168e-01, -9.48748823e-02,
-3.74701248e-01, -6.68192840e-01, -7.77720853e-01, -8.15718667e-01,
-1.19404470e+00, -2.17926682e+00, -3.40215785e+00, -3.99418641e+00,
-3.25449483e+00, -1.22032570e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.72304245e+00,
-2.63776333e+00, -2.11110151e+00, -7.22241992e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.37988828e-01,
-1.36497601e-01, 1.32278945e-01, 5.15571508e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 8.42548433e-01, 2.72091640e+00, 3.24542027e+00,
2.03399132e+00, 3.77699186e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.99799699e-01,
5.92260640e-02, -1.02272668e-02, -3.19939917e-01, -7.49813663e-01,
-1.06945353e+00, -1.04483902e+00, -5.36963585e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-3.23192576e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 1.42596962e-01, -1.71901114e-01,
-7.45893810e-02, 3.71940362e-01, 6.24286373e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -9.16154067e-02, -8.72890060e-02,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 8.36067934e-01, 1.13854180e+00,
4.79721529e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00])
# fmt: on
)
| 61.668155
| 92
| 0.637171
| 6,597
| 41,441
| 3.993027
| 0.068971
| 0.567914
| 0.681497
| 0.767444
| 0.947384
| 0.942753
| 0.937135
| 0.934819
| 0.934819
| 0.932807
| 0
| 0.67774
| 0.199537
| 41,441
| 671
| 93
| 61.76006
| 0.116363
| 0.004199
| 0
| 0.847134
| 0
| 0
| 0.000485
| 0
| 0
| 0
| 0
| 0
| 0.036624
| 1
| 0.009554
| false
| 0
| 0.014331
| 0
| 0.02707
| 0.003185
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
ef70c70213111c3000552a67883e02a0cf97b098
| 152
|
py
|
Python
|
snip/__main__.py
|
arminfriedl/snip
|
b689ba8634ea192c2b19adbc075e70d451a4f4b3
|
[
"MIT"
] | null | null | null |
snip/__main__.py
|
arminfriedl/snip
|
b689ba8634ea192c2b19adbc075e70d451a4f4b3
|
[
"MIT"
] | null | null | null |
snip/__main__.py
|
arminfriedl/snip
|
b689ba8634ea192c2b19adbc075e70d451a4f4b3
|
[
"MIT"
] | null | null | null |
from . import app
from . import snip_config
if __name__ == "__main__":
app.run(host=snip_config.SNIP_FLASK_HOST, port=snip_config.SNIP_FLASK_PORT)
| 25.333333
| 79
| 0.776316
| 24
| 152
| 4.291667
| 0.5
| 0.291262
| 0.271845
| 0.368932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 152
| 5
| 80
| 30.4
| 0.774436
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
ef851f2f7095d06956de6df90e3cfded20236a2d
| 138
|
py
|
Python
|
test/Backend/NoneBackend.py
|
DatabaseStratum/py-stratum-cli
|
e02278386d0c82e08760e2f25c9dadfe94c718b6
|
[
"MIT"
] | 11
|
2015-01-22T11:06:30.000Z
|
2018-12-21T08:21:21.000Z
|
test/Backend/NoneBackend.py
|
DatabaseStratum/py-stratum-cli
|
e02278386d0c82e08760e2f25c9dadfe94c718b6
|
[
"MIT"
] | 1
|
2016-10-19T09:10:40.000Z
|
2016-10-19T09:10:40.000Z
|
test/Backend/NoneBackend.py
|
DatabaseStratum/py-stratum-cli
|
e02278386d0c82e08760e2f25c9dadfe94c718b6
|
[
"MIT"
] | 6
|
2016-02-21T15:19:31.000Z
|
2020-02-23T11:14:07.000Z
|
from pystratum_backend.Backend import Backend
class NoneBackend(Backend):
"""
Backend without implementations.
"""
pass
| 15.333333
| 45
| 0.702899
| 13
| 138
| 7.384615
| 0.692308
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 138
| 8
| 46
| 17.25
| 0.888889
| 0.231884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
efa3200e4c1b4560cab7268c01548cb8b8793342
| 1,099
|
py
|
Python
|
client/Mahjong-GB/test.py
|
chriswang2468/mj
|
918a2414c7e4cde5ad17a33a23c90461aabb87f2
|
[
"Apache-2.0"
] | null | null | null |
client/Mahjong-GB/test.py
|
chriswang2468/mj
|
918a2414c7e4cde5ad17a33a23c90461aabb87f2
|
[
"Apache-2.0"
] | null | null | null |
client/Mahjong-GB/test.py
|
chriswang2468/mj
|
918a2414c7e4cde5ad17a33a23c90461aabb87f2
|
[
"Apache-2.0"
] | null | null | null |
from MahjongGB import MahjongFanCalculator
try:
ans=MahjongFanCalculator((),("W1","W1","W1","W2","W2","W2","W3","W3","W3","W4","W4","W4","W5"),"W5",1,True,False,False,True,0,0)
except Exception as err:
print(err)
else:
print(ans)
try:
ans=MahjongFanCalculator((("GANG","W1",2),),("W2","W2","W2","W3","W3","W3","W4","W4","W4","W5"),"W5",1,False,False,False,False,0,0)
except Exception as err:
print(err)
else:
print(ans)
#错误
try:
ans=MahjongFanCalculator((),("W1","W1","W1","W2","W2","W2","W3","W3","W3","W4","W4","W4"),"W5",1,True,False,False,True,0,0)
except Exception as err:
print(err)
else:
print(ans)
#没和
try:
ans=MahjongFanCalculator((("CHI","W1",0),),("W2","W2","W2","W3","W3","W3","W4","W4","W4","W5"),"W7",1,False,False,False,False,0,0)
except Exception as err:
print(err)
else:
print(ans)
try:
ans=MahjongFanCalculator((("PENG","W9",0),("CHI","T7",2), ("CHI", "W7", 1), ("CHI", "W7", 2)),("T9", ),"T9",0,False,False,False,False,0,0)
except Exception as err:
print(err)
else:
print(ans)
| 28.179487
| 143
| 0.567789
| 170
| 1,099
| 3.670588
| 0.182353
| 0.176282
| 0.144231
| 0.136218
| 0.817308
| 0.817308
| 0.817308
| 0.817308
| 0.817308
| 0.817308
| 0
| 0.083156
| 0.146497
| 1,099
| 38
| 144
| 28.921053
| 0.58209
| 0.00364
| 0
| 0.806452
| 0
| 0
| 0.127014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0.322581
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
efd942c84898e89c5e274200a496fc342ea706d3
| 456
|
py
|
Python
|
tsbenchmark/core/loader.py
|
DataCanvasIO/TSBenchmark
|
e8e45cd0cd206723ad59a8cac2a572e48f43a728
|
[
"Apache-2.0"
] | 2
|
2022-03-28T02:10:09.000Z
|
2022-03-29T02:56:26.000Z
|
tsbenchmark/core/loader.py
|
DataCanvasIO/TSBenchmark
|
e8e45cd0cd206723ad59a8cac2a572e48f43a728
|
[
"Apache-2.0"
] | null | null | null |
tsbenchmark/core/loader.py
|
DataCanvasIO/TSBenchmark
|
e8e45cd0cd206723ad59a8cac2a572e48f43a728
|
[
"Apache-2.0"
] | null | null | null |
class DataSetLoader:
def __init__(self):
pass
def list(self, data_size, type):
pass
def exists(self, dataset_id):
pass
def load_meta(self, dataset_id):
pass
def load(self, dataset_id):
pass
class TaskLoader:
def __init__(self):
pass
def list(self, data_size, type):
pass
def exists(self, dataset_id):
pass
def load(self, dataset_id):
pass
| 15.2
| 36
| 0.572368
| 57
| 456
| 4.298246
| 0.280702
| 0.2
| 0.265306
| 0.346939
| 0.84898
| 0.84898
| 0.84898
| 0.84898
| 0.84898
| 0.84898
| 0
| 0
| 0.344298
| 456
| 29
| 37
| 15.724138
| 0.819398
| 0
| 0
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.45
| false
| 0.45
| 0
| 0
| 0.55
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 12
|
4bc6a9a095380babfa08d9bb75d70e047c8ef5a3
| 94
|
py
|
Python
|
carsus/io/output/__init__.py
|
parikshit14/carsus
|
3f67e8068829829361d7b1da9020e1fde9dcac2e
|
[
"BSD-3-Clause"
] | 21
|
2016-06-01T16:12:03.000Z
|
2022-02-04T09:03:38.000Z
|
carsus/io/output/__init__.py
|
parikshit14/carsus
|
3f67e8068829829361d7b1da9020e1fde9dcac2e
|
[
"BSD-3-Clause"
] | 149
|
2016-05-03T17:50:42.000Z
|
2022-03-25T14:48:51.000Z
|
carsus/io/output/__init__.py
|
parikshit14/carsus
|
3f67e8068829829361d7b1da9020e1fde9dcac2e
|
[
"BSD-3-Clause"
] | 34
|
2016-05-03T16:39:11.000Z
|
2022-02-03T16:39:49.000Z
|
from carsus.io.output.tardis_ import AtomData
from carsus.io.output.base import TARDISAtomData
| 47
| 48
| 0.861702
| 14
| 94
| 5.714286
| 0.642857
| 0.25
| 0.3
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074468
| 94
| 2
| 48
| 47
| 0.91954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ef232963020f3dcb500abc12fb3e5f9deeb905ee
| 23
|
py
|
Python
|
src/mathlib1/add.py
|
PartehDev/mathlib1
|
65047d1d5c4ecddee4c889786552592be1d1ea94
|
[
"MIT"
] | null | null | null |
src/mathlib1/add.py
|
PartehDev/mathlib1
|
65047d1d5c4ecddee4c889786552592be1d1ea94
|
[
"MIT"
] | null | null | null |
src/mathlib1/add.py
|
PartehDev/mathlib1
|
65047d1d5c4ecddee4c889786552592be1d1ea94
|
[
"MIT"
] | null | null | null |
def add(a,b):return a+b
| 23
| 23
| 0.695652
| 7
| 23
| 2.285714
| 0.714286
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 23
| 1
| 23
| 23
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| false
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
3291d375be54358c9df1c7bffc404d1ca663bb1c
| 216
|
py
|
Python
|
tests/test_caller_module.py
|
brunonicko/basicco
|
8cd1d66073df47c6b56be3d431f8e6b9ed6d9fa9
|
[
"MIT"
] | 6
|
2021-11-16T11:26:54.000Z
|
2022-01-24T05:50:22.000Z
|
tests/test_caller_module.py
|
brunonicko/basicco
|
8cd1d66073df47c6b56be3d431f8e6b9ed6d9fa9
|
[
"MIT"
] | null | null | null |
tests/test_caller_module.py
|
brunonicko/basicco
|
8cd1d66073df47c6b56be3d431f8e6b9ed6d9fa9
|
[
"MIT"
] | null | null | null |
import pytest
from basicco import caller_module
def test_caller_module():
def func():
return caller_module.caller_module()
assert func() == __name__
if __name__ == "__main__":
pytest.main()
| 14.4
| 44
| 0.685185
| 26
| 216
| 5.038462
| 0.538462
| 0.366412
| 0.229008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217593
| 216
| 14
| 45
| 15.428571
| 0.775148
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.25
| true
| 0
| 0.25
| 0.125
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
32b941bf04bd246d3c3b4f5ef0b8fc17ee88549c
| 2,301
|
py
|
Python
|
tests/v2/test_1379-reducers-with-axis-None-and-typetracers.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | 2
|
2019-09-12T03:07:23.000Z
|
2019-09-27T05:32:07.000Z
|
tests/v2/test_1379-reducers-with-axis-None-and-typetracers.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | 1
|
2019-09-26T17:57:45.000Z
|
2019-09-26T17:57:45.000Z
|
tests/v2/test_1379-reducers-with-axis-None-and-typetracers.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import awkward as ak # noqa: F401
def test():
x = ak._v2.from_iter([[{"x": 1, "y": 1.1}], [], [{"x": 2, "y": 2.2}]])
x_tt = ak._v2.Array(x.layout.typetracer)
assert str(ak._v2.count(x_tt, flatten_records=True)) == "unknown-int64"
assert str(ak._v2.count_nonzero(x_tt, flatten_records=True)) == "unknown-int64"
assert str(ak._v2.any(x_tt, flatten_records=True)) == "unknown-bool"
assert str(ak._v2.all(x_tt, flatten_records=True)) == "unknown-bool"
assert str(ak._v2.prod(x_tt, flatten_records=True)) == "unknown-float64"
assert str(ak._v2.sum(x_tt, flatten_records=True)) == "unknown-float64"
assert str(ak._v2.max(x_tt, flatten_records=True)) == "maybe-unknown-float64"
assert str(ak._v2.min(x_tt, flatten_records=True)) == "maybe-unknown-float64"
assert str(ak._v2.argmax(x_tt, flatten_records=True)) == "maybe-unknown-int64"
assert str(ak._v2.argmin(x_tt, flatten_records=True)) == "maybe-unknown-int64"
assert str(ak._v2.count(x_tt.x)) == "unknown-int64"
assert str(ak._v2.count_nonzero(x_tt.x)) == "unknown-int64"
assert str(ak._v2.any(x_tt.x)) == "unknown-bool"
assert str(ak._v2.all(x_tt.x)) == "unknown-bool"
assert str(ak._v2.prod(x_tt.x, flatten_records=True)) == "unknown-int64"
assert str(ak._v2.prod(x_tt.y, flatten_records=True)) == "unknown-float64"
assert str(ak._v2.sum(x_tt.x, flatten_records=True)) == "unknown-int64"
assert str(ak._v2.sum(x_tt.y, flatten_records=True)) == "unknown-float64"
assert str(ak._v2.max(x_tt.x)) == "maybe-unknown-int64"
assert str(ak._v2.max(x_tt.y)) == "maybe-unknown-float64"
assert str(ak._v2.min(x_tt.x)) == "maybe-unknown-int64"
assert str(ak._v2.min(x_tt.y)) == "maybe-unknown-float64"
assert str(ak._v2.argmax(x_tt.x)) == "maybe-unknown-int64"
assert str(ak._v2.argmax(x_tt.y)) == "maybe-unknown-int64"
assert str(ak._v2.argmin(x_tt.x)) == "maybe-unknown-int64"
assert str(ak._v2.argmin(x_tt.y)) == "maybe-unknown-int64"
assert str(ak._v2.mean(x_tt, flatten_records=True)) == "unknown-float64"
assert str(ak._v2.mean(x_tt.x)) == "unknown-float64"
assert str(ak._v2.mean(x_tt.y)) == "unknown-float64"
| 54.785714
| 87
| 0.678401
| 389
| 2,301
| 3.809769
| 0.138817
| 0.083671
| 0.21525
| 0.254386
| 0.873819
| 0.873819
| 0.873819
| 0.84278
| 0.84278
| 0.74359
| 0
| 0.047904
| 0.129074
| 2,301
| 41
| 88
| 56.121951
| 0.691617
| 0.046502
| 0
| 0
| 0
| 0
| 0.215068
| 0.038356
| 0
| 0
| 0
| 0
| 0.852941
| 1
| 0.029412
| false
| 0
| 0.058824
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
32bc841a1ae90cb8aec304523dd80d96f0118d6b
| 219
|
py
|
Python
|
py_learning_6_30/CheckName.py
|
Mloser-z/Python_Learning
|
c0b663faec17349547c8089fc2a918153b675ae9
|
[
"Apache-2.0"
] | null | null | null |
py_learning_6_30/CheckName.py
|
Mloser-z/Python_Learning
|
c0b663faec17349547c8089fc2a918153b675ae9
|
[
"Apache-2.0"
] | null | null | null |
py_learning_6_30/CheckName.py
|
Mloser-z/Python_Learning
|
c0b663faec17349547c8089fc2a918153b675ae9
|
[
"Apache-2.0"
] | null | null | null |
def get_formatted_name(first, last):
return first.title() + ' ' + last.title()
def get_name():
first = 'janis'
last = 'joplin'
full_name = get_formatted_name(first, last)
return full_name
| 21.9
| 48
| 0.625571
| 28
| 219
| 4.642857
| 0.392857
| 0.207692
| 0.246154
| 0.323077
| 0.476923
| 0.476923
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251142
| 219
| 9
| 49
| 24.333333
| 0.792683
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
32bc89489164310cc376277bbde50adb0ed71718
| 3,445
|
py
|
Python
|
swmm_calibration/helpers/utils.py
|
mmmatthew/swmm_calibration
|
b6bcd1bdb71d25f2095c6863609735de8e3bd6bc
|
[
"MIT"
] | 5
|
2019-10-22T03:56:21.000Z
|
2021-05-23T02:39:14.000Z
|
swmm_calibration/helpers/utils.py
|
mmmatthew/swmm_calibration
|
b6bcd1bdb71d25f2095c6863609735de8e3bd6bc
|
[
"MIT"
] | 2
|
2019-07-12T01:04:09.000Z
|
2021-06-01T23:29:30.000Z
|
swmm_calibration/helpers/utils.py
|
mmmatthew/swmm_calibration
|
b6bcd1bdb71d25f2095c6863609735de8e3bd6bc
|
[
"MIT"
] | 3
|
2019-03-12T15:01:33.000Z
|
2021-12-01T07:35:40.000Z
|
import pandas as pd
from datetime import datetime
import csv
def format_resample(
input_file, # full path to data file to be read
output_file, # path to file where data should be saved
aggregation_frequency=None, # resampling frequency of data
aggregation_type='mean' # aggregation method for data
):
# reads CSV data in pandas format and outputs data in SWMM format
# resamples the data to 1-second frequency
# read data
data = pd.read_csv(input_file,
parse_dates=[0],
index_col=0,
infer_datetime_format=True,
dayfirst=True,
# date_parser=date_parser,
sep=';')
# aggregate per second, interpolate
data = data.resample('S')
data = data.mean()
data = data.resample('S')
data = data.interpolate(method='linear')
data['datetime'] = data.index
data['date'] = data['datetime'].apply(lambda x: x.strftime('%m/%d/%Y'))
data['time'] = data['datetime'].apply(lambda x: x.strftime('%H:%M:%S'))
data.to_csv(output_file,
sep=' ',
columns=['date', 'time', 'value'],
# quoting=3, #csv.QUOTE_NONE,
index=False,
header=False)
def resample(
input_file, # full path to data file to be read
output_file, # path to file where data should be saved
aggregation_period='S', # resampling frequency of data
aggregation_type='mean' # aggregation method for data
):
# converts pandas formatted file to SWMM formatted file
# read data
data = pd.read_csv(input_file,
parse_dates=[0],
index_col=0,
infer_datetime_format=True,
dayfirst=True,
# date_parser=date_parser,
sep=';')
# aggregate per second, interpolate
data = data.resample(aggregation_period)
data = data.mean()
data = data.resample(aggregation_period)
data = data.interpolate(method='linear')
data['datetime'] = data.index
data.to_csv(output_file,
sep=';',
columns=['datetime', 'value'],
# quoting=3, #csv.QUOTE_NONE,
index=False,
header=True)
def format(
input_file, # full path to data file to be read
output_file, # path to file where data should be saved
aggregation_frequency=None, # resampling frequency of data
aggregation_type='mean' # aggregation method for data
):
# converts pandas formatted file to SWMM formatted file
# read data
data = pd.read_csv(input_file,
parse_dates=[0],
index_col=0,
infer_datetime_format=True,
dayfirst=True,
# date_parser=date_parser,
sep=';')
data['datetime'] = data.index
data['date'] = data['datetime'].apply(lambda x: x.strftime('%m/%d/%Y'))
data['time'] = data['datetime'].apply(lambda x: x.strftime('%H:%M:%S'))
data.to_csv(output_file,
sep=' ',
columns=['date', 'time', 'value'],
# quoting=3, #csv.QUOTE_NONE,
index=False,
header=False)
| 34.79798
| 77
| 0.542816
| 387
| 3,445
| 4.70801
| 0.193798
| 0.048299
| 0.035126
| 0.050494
| 0.902854
| 0.902854
| 0.884193
| 0.838639
| 0.838639
| 0.816136
| 0
| 0.004486
| 0.352975
| 3,445
| 98
| 78
| 35.153061
| 0.812921
| 0.250218
| 0
| 0.855072
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3ee9f7ca6d29d803d5c1f96c414b73a6c77e5c36
| 168
|
py
|
Python
|
auth-center/App/api/user_source/__init__.py
|
Basic-Components/auth-center
|
bf03922be37161108426712465719f5a3f165834
|
[
"MIT"
] | 1
|
2021-08-03T09:02:26.000Z
|
2021-08-03T09:02:26.000Z
|
auth-center/App/api/user_source/__init__.py
|
Basic-Components/auth-center
|
bf03922be37161108426712465719f5a3f165834
|
[
"MIT"
] | null | null | null |
auth-center/App/api/user_source/__init__.py
|
Basic-Components/auth-center
|
bf03922be37161108426712465719f5a3f165834
|
[
"MIT"
] | 1
|
2018-01-15T14:28:46.000Z
|
2018-01-15T14:28:46.000Z
|
from .user_list import UserListSource
from .user_roles import UserRoleSource
from .user_password import UserPasswordSource
from .user_main_email import UserEmailSource
| 33.6
| 45
| 0.880952
| 21
| 168
| 6.809524
| 0.571429
| 0.223776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 168
| 4
| 46
| 42
| 0.940789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
41019bacd3247527326f33c61636e81912648266
| 118
|
py
|
Python
|
train_clf/__init__.py
|
newTypeGeek/face-recognition
|
235cf4aaf60ba3504b0e73dbab5f9dc4c7cc3dbd
|
[
"Apache-2.0"
] | 5
|
2020-02-10T04:38:40.000Z
|
2021-09-01T18:50:18.000Z
|
train_clf/__init__.py
|
newTypeGeek/face-recognition
|
235cf4aaf60ba3504b0e73dbab5f9dc4c7cc3dbd
|
[
"Apache-2.0"
] | 1
|
2020-06-11T18:26:38.000Z
|
2020-06-11T18:26:38.000Z
|
train_clf/__init__.py
|
newTypeGeek/face-recognition
|
235cf4aaf60ba3504b0e73dbab5f9dc4c7cc3dbd
|
[
"Apache-2.0"
] | 3
|
2019-06-24T12:30:12.000Z
|
2020-02-10T04:39:59.000Z
|
import train_clf.train_svm as train_svm
import train_clf.train_knn as train_knn
import train_clf.train_rf as train_rf
| 29.5
| 39
| 0.872881
| 24
| 118
| 3.916667
| 0.291667
| 0.351064
| 0.446809
| 0.606383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 118
| 3
| 40
| 39.333333
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4106c47b8c30b1131d7b048fc142e9f842193ec4
| 25,328
|
py
|
Python
|
app/forms.py
|
stolau/oty_ilmo
|
61764d3a352c5f0325d4baec983f311ba5037e04
|
[
"MIT"
] | null | null | null |
app/forms.py
|
stolau/oty_ilmo
|
61764d3a352c5f0325d4baec983f311ba5037e04
|
[
"MIT"
] | null | null | null |
app/forms.py
|
stolau/oty_ilmo
|
61764d3a352c5f0325d4baec983f311ba5037e04
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, SubmitField, RadioField, TextAreaField, SelectField
from wtforms.validators import DataRequired, Email, Optional, length, Required, InputRequired, Optional
class RequiredIf(InputRequired):
"""Validator which makes a field required if another field is set and has a truthy value.
Sources:
- http://wtforms.simplecodes.com/docs/1.0.1/validators.html
- http://stackoverflow.com/questions/8463209/how-to-make-a-field-conditionally-optional-in-wtforms
- https://www.reddit.com/r/flask/comments/7y1k6p/af_wtforms_required_if_validator/
"""
field_flags = ('requiredif',)
def __init__(self, other_field_name, message=None, *args, **kwargs):
self.other_field_name = other_field_name
self.message = message
def __call__(self, form, field):
other_field = form[self.other_field_name]
if other_field is None:
raise Exception('no field named "%s" in form' % self.other_field_name)
if bool(other_field.data):
super(RequiredIf, self).__call__(form, field)
else:
Optional().__call__(form, field)
class RequiredIfValue(InputRequired):
"""Validator which makes a field required if another field is set and has a truthy value.
Sources:
- http://wtforms.simplecodes.com/docs/1.0.1/validators.html
- http://stackoverflow.com/questions/8463209/how-to-make-a-field-conditionally-optional-in-wtforms
- https://www.reddit.com/r/flask/comments/7y1k6p/af_wtforms_required_if_validator/
"""
field_flags = ('requiredif',)
def __init__(self, other_field_name, value, message=None, *args, **kwargs):
self.other_field_name = other_field_name
self.message = message
self.value = value
def __call__(self, form, field):
other_field = form[self.other_field_name]
value = self.value
if other_field is None:
raise Exception('no field named "%s" in form' % self.other_field_name)
if bool(other_field.data == value):
super(RequiredIfValue, self).__call__(form, field)
else:
Optional().__call__(form, field)
class pubivisaForm(FlaskForm):
teamname = StringField('Joukkueen nimi *', validators=[DataRequired(), length(max=100)])
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta0 = SelectField('Kilta *',
choices=(['OTiT', 'OTiT'], ['SIK', 'SIK'], ['YMP', 'YMP'], ['KONE', 'KONE'],
['PROSE', 'PROSE'], ['OPTIEM', 'OPTIEM'], ['ARK', 'ARK']),
validators=[DataRequired()])
etunimi1 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi1 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone1 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email1 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta1 = SelectField('Kilta *',
choices=(['OTiT', 'OTiT'], ['SIK', 'SIK'], ['YMP', 'YMP'], ['KONE', 'KONE'],
['PROSE', 'PROSE'], ['OPTIEM', 'OPTIEM'], ['ARK', 'ARK']),
validators=[DataRequired()])
etunimi2 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi2 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone2 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email2 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta2 = SelectField('Kilta *',
choices=(['OTiT', 'OTiT'], ['SIK', 'SIK'], ['YMP', 'YMP'], ['KONE', 'KONE'],
['PROSE', 'PROSE'], ['OPTIEM', 'OPTIEM'], ['ARK', 'ARK']),
validators=[DataRequired()])
etunimi3 = StringField('Etunimi', validators=[length(max=50)])
sukunimi3 = StringField('Sukunimi', validators=[length(max=50)])
phone3 = StringField('Puhelinnumero', validators=[length(max=20)])
email3 = StringField('Sähköposti', validators=[length(max=100)])
kilta3 = SelectField('Kilta',
choices=(['OTiT', 'OTiT'], ['SIK', 'SIK'], ['YMP', 'YMP'], ['KONE', 'KONE'],
['PROSE', 'PROSE'], ['OPTIEM', 'OPTIEM'], ['ARK', 'ARK']))
consent0 = BooleanField('Sallin joukkueen nimen julkaisemisen osallistujalistassa')
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
consent2 = BooleanField('Ymmärrän, että ilmoittautuminen on sitova *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class korttijalautapeliiltaForm(FlaskForm):
etunimi = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta = SelectField('Kilta *',
choices=(['OTiT', 'OTiT'], ['SIK', 'SIK'], ['YMP', 'YMP'], ['KONE', 'KONE'],
['PROSE', 'PROSE'], ['OPTIEM', 'OPTIEM'], ['ARK', 'ARK']))
consent0 = BooleanField('Sallin nimeni julkaisemisen osallistujalistassa')
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojeni käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
consent2 = BooleanField('Ymmärrän, että ilmoittautuminen on sitova *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class fuksilauluiltaForm(FlaskForm):
etunimi = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
email = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojeni käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class slumberpartyForm(FlaskForm):
etunimi = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta = SelectField('Kilta *',
choices=(['OTiT', 'OTiT'], ['SIK', 'SIK'], ['YMP', 'YMP'], ['KONE', 'KONE'],
['PROSE', 'PROSE'], ['OPTIEM', 'OPTIEM'], ['ARK', 'ARK']))
consent0 = BooleanField('Sallin nimeni julkaisemisen osallistujalistassa')
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojeni käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
consent2 = BooleanField('Ymmärrän, että ilmoittautuminen on sitova *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class pakohuoneForm(FlaskForm):
aika = RadioField('Aika *',
choices=(['18:00', '18:00'], ['19:30', '19:30']),
validators=[DataRequired()])
huone1800 = RadioField('Huone (18:00) *',
choices=(['Pommi (Uusikatu)', ''],
['Kuolleen miehen saari (Uusikatu)', ''],
['Temppelin kirous (Uusikatu)', ''],
['Velhon perintö (Uusikatu)', ''],
['Murhamysteeri (Kajaaninkatu)', ''],
['Vankilapako (Kajaaninkatu)', ''],
['Professorin arvoitus (Kajaaninkatu)', ''],
['The SAW (Kirkkokatu)', ''],
['Alcatraz (Kirkkokatu)', ''],
['Matka maailman ympäri (Kirkkokatu)', ''],
['', '']),
validators=[RequiredIfValue(other_field_name='aika', value='18:00')],
default=(['', '']))
huone1930 = RadioField('Huone (19:30) *',
choices=(['Pommi (Uusikatu)', ''],
['Kuolleen miehen saari (Uusikatu)', ''],
['Temppelin kirous (Uusikatu)', ''],
['Velhon perintö (Uusikatu)', ''],
['Murhamysteeri (Kajaaninkatu)', ''],
['Vankilapako (Kajaaninkatu)', ''],
['Professorin arvoitus (Kajaaninkatu)', ''],
['The SAW (Kirkkokatu)', ''],
['Alcatraz (Kirkkokatu)', ''],
['Matka maailman ympäri (Kirkkokatu)', ''],
['', '']),
validators=[RequiredIfValue(other_field_name='aika', value='19:30')],
default=(['', '']))
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
etunimi1 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi1 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
etunimi2 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi2 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
etunimi3 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi3 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
etunimi4 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi4 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
etunimi5 = StringField('Etunimi', validators=[length(max=50)])
sukunimi5 = StringField('Sukunimi', validators=[length(max=50)])
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojeni käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class kysely_arvonta_juttuForm(FlaskForm):
etunimi = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
email = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojeni käytön *', validators=[DataRequired()])
submit = SubmitField('Submit')
class ots2021Form(FlaskForm):
etunimi = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
email = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta = SelectField('Kilta',
choices=(['ARK', 'Oulun Arkkitehtikilta ry'], ['KONE', 'Oulun Yliopiston Koneinsinöörikilta ry'], ['OLTO', 'Oulun lääketieteen tekniikan opiskelijat ry'], ['OPTIEM', 'OPTIEM - Oulun Tuotantotalousteekkarit ry'],
['OTiT', 'Oulun Tietoteekkarit ry'], ['PROSE', 'Oulun yliopiston Prosessikilta ry'], ['SIK', 'Sähköinsinöörikilta ry'], ['YMP', 'Oulun yliopiston Ympäristörakentajakilta ry'], ['MUU', 'Muu taho']))
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojeni käytön *', validators=[DataRequired()])
consent1 = BooleanField('Minulle saa lähettää kutsun OTS sitseille myöhemmin', validators=[])
submit = SubmitField('Submit')
class fuksisitsitForm(FlaskForm):
'''
Created 7.4.2021
'''
membercount = SelectField('Jäsenmäärä',
choices=(['1', '1'], ['2', '2'], ['3', '3'], ['4', '4'],
['5', '5'], ['6', '6']),
validators=[DataRequired()])
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta0 = SelectField('Kilta *',
choices=(['ARK', 'Oulun Arkkitehtikilta ry'], ['KONE', 'Oulun Yliopiston Koneinsinöörikilta ry'], ['OLTO', 'Oulun lääketieteen tekniikan opiskelijat ry'], ['OPTIEM', 'OPTIEM - Oulun tuotantotalousteekkarit ry'],
['OTiT', 'Oulun Tietoteekkarit ry'], ['PROSE', 'Oulun yliopiston Prosessikilta ry'], ['SIK', 'Sähköinsinöörikilta ry'], ['YRK', 'Oulun yliopiston Ympäristörakentajakilta ry'], ['MUU', 'Muu taho']),
validators=[DataRequired()])
vapaavalinta0 = StringField('Muut samalla yhteydellä osallistuvat', validators=[length(max=200)])
consent0 = BooleanField('Sallin nimeni julkaisemisen osallistujalistassa')
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
consent2 = BooleanField('Ymmärrän, että ilmoittautuminen on sitova *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class tupsufuksisitsitForm(FlaskForm):
'''
Created 7.4.2021
'''
membercount = SelectField('Jäsenmäärä',
choices=(['1', '1'], ['2', '2'], ['3', '3'], ['4', '4'],
['5', '5'], ['6', '6']),
validators=[DataRequired()])
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta0 = SelectField('Kilta *',
choices=(['ARK', 'Oulun Arkkitehtikilta ry'], ['KONE', 'Oulun Yliopiston Koneinsinöörikilta ry'], ['OLTO', 'Oulun lääketieteen tekniikan opiskelijat ry'], ['OPTIEM', 'OPTIEM - Oulun tuotantotalousteekkarit ry'],
['OTiT', 'Oulun Tietoteekkarit ry'], ['PROSE', 'Oulun yliopiston Prosessikilta ry'], ['SIK', 'Sähköinsinöörikilta ry'], ['YRK', 'Oulun yliopiston Ympäristörakentajakilta ry'], ['MUU', 'Muu taho']),
validators=[DataRequired()])
vapaavalinta0 = StringField('Muut samalla yhteydellä osallistuvat', validators=[length(max=200)])
consent0 = BooleanField('Sallin nimeni julkaisemisen osallistujalistassa')
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
consent2 = BooleanField('Ymmärrän, että ilmoittautuminen on sitova *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class fuksibeerpongForm(FlaskForm):
'''
Created 7.4.2021
'''
joukkue = StringField('Joukkueen nimi *', validators=[DataRequired(), length(max=50)])
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
etunimi1 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi1 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
consent0 = BooleanField('Sallin joukkueen julkaisemisen osallistujalistassa')
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
consent2 = BooleanField('Ymmärrän, että ilmoittautuminen on sitova *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class beerpongForm(FlaskForm):
'''
Created 7.4.2021
'''
joukkue = StringField('Joukkueen nimi *', validators=[DataRequired(), length(max=50)])
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
etunimi1 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi1 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
consent0 = BooleanField('Sallin joukkueen julkaisemisen osallistujalistassa')
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
consent2 = BooleanField('Ymmärrän, että ilmoittautuminen on sitova *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class aitiForm(FlaskForm):
'''
Created 7.4.2021
'''
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
kilta0 = SelectField('Kilta *',
choices=(['ARK', 'Oulun Arkkitehtikilta ry'], ['KONE', 'Oulun Yliopiston Koneinsinöörikilta ry'], ['OLTO', 'Oulun lääketieteen tekniikan opiskelijat ry'], ['OPTIEM', 'OPTIEM - Oulun tuotantotalousteekkarit ry'],
['OTiT', 'Oulun Tietoteekkarit ry'], ['PROSE', 'Oulun yliopiston Prosessikilta ry'], ['SIK', 'Sähköinsinöörikilta ry'], ['YRK', 'Oulun yliopiston Ympäristörakentajakilta ry'], ['MUU', 'Muu taho']),
validators=[DataRequired()])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class hvsitsituusiForm(FlaskForm):
'''
Created 7.4.2021
'''
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
drink0 = SelectField('Mietojuoma *',
choices=(['Olut', 'Olut'], ['Siideri', 'Siideri'], ['Alkoholiton', 'Alkoholiton']),
validators=[DataRequired()])
wine0 = SelectField('Viini *',
choices=(['Puna', 'Puna'], ['Valko', 'Valko'], ['Alkoholiton', 'Alkoholiton'], ['Ei ollenkaan', 'Ei Ollenkaan']),
validators=[DataRequired()])
viina0 = SelectField('Viinakaato *',
choices=(['Alkoholi', 'Alkoholi'], ['Alkoholiton', 'Alkoholiton']),
validators=[DataRequired()])
other0 = StringField('Ruokavalio/Allergiat ', validators=[length(max=150)])
consent1 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class syysuitotForm(FlaskForm):
'''
Created 7.4.2021
'''
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta0 = SelectField('Kilta *',
choices=(['ARK', 'Oulun Arkkitehtikilta ry'], ['KONE', 'Oulun Yliopiston Koneinsinöörikilta ry'], ['OLTO', 'Oulun lääketieteen tekniikan opiskelijat ry'], ['OPTIEM', 'OPTIEM - Oulun tuotantotalousteekkarit ry'],
['OTiT', 'Oulun Tietoteekkarit ry'], ['PROSE', 'Oulun yliopiston Prosessikilta ry'], ['SIK', 'Sähköinsinöörikilta ry'], ['YRK', 'Oulun yliopiston Ympäristörakentajakilta ry'], ['MUU', 'Muu taho']),
validators=[DataRequired()])
lakki0 = BooleanField('Haen teekkarilakin uittojen jälkeen ')
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class prositsitForm(FlaskForm):
'''
Created 7.4.2021
'''
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=20)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
kilta0 = SelectField('Kilta *',
choices=(['ARK', 'Oulun Arkkitehtikilta ry'], ['KONE', 'Oulun Yliopiston Koneinsinöörikilta ry'], ['OLTO', 'Oulun lääketieteen tekniikan opiskelijat ry'], ['OPTIEM', 'OPTIEM - Oulun tuotantotalousteekkarit ry'],
['OTiT', 'Oulun Tietoteekkarit ry'], ['PROSE', 'Oulun yliopiston Prosessikilta ry'], ['SIK', 'Sähköinsinöörikilta ry'], ['YRK', 'Oulun yliopiston Ympäristörakentajakilta ry'], ['MUU', 'Muu taho']),
validators=[DataRequired()])
food0 = SelectField('Ruoka *',
choices=(['Sekasyöjä', 'Sekasyöjä'], ['Vegetaristi', 'Vegetaristi'], ['Vegaani', 'Vegaani']),
validators=[DataRequired()])
allergies0 = StringField('Allergiat ', validators=[length(max=100)])
drink0 = SelectField('Mieto *',
choices=(['Olut', 'Olut'], ['Siideri', 'Siideri'], ['Holiton', 'Holiton']),
validators=[DataRequired()])
drink1 = SelectField('Viini *',
choices=(['Punaviini', 'Punaviini'], ['Valkoviini', 'Valkoviini'], ['Holiton', 'holiton']),
validators=[DataRequired()])
drink2 = SelectField('Snapsi *',
choices=(['Holillinen', 'Holillinen'], ['Holiton', 'Holiton']),
validators=[DataRequired()])
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class simailutForm(FlaskForm):
'''
Created 7.4.2021
'''
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
special0 = StringField('Erikoisruokavalio ', validators=[length(max=100)])
allergies0 = StringField('Allergiat ', validators=[length(max=100)])
avec0 = BooleanField('Avec ', render_kw={'onchange': "slideDownLakki()"})
etunimi1 = StringField('Etunimi *', validators=[RequiredIf('avec0'), length(max=50)])
sukunimi1 = StringField('Sukunimi *', validators=[RequiredIf('avec0'), length(max=50)])
email1 = StringField('Sähköposti *', validators=[RequiredIf('avec0'), length(max=50)])
special1 = StringField('Erikoisruokavalio ', validators=[length(max=100)])
allergies1 = StringField('Allergiat ', validators=[length(max=100)])
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class kotiruokakurssi1Form(FlaskForm):
'''
Created 7.4.2021
'''
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=50)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
food0 = StringField('Ruokavalio ', validators=[length(max=100)])
allergies0 = StringField('Allergiat ', validators=[length(max=100)])
free0 = StringField('Vapaat terveiset ', validators=[length(max=100)])
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
class kotiruokakurssi2Form(FlaskForm):
'''
Created 7.4.2021
'''
etunimi0 = StringField('Etunimi *', validators=[DataRequired(), length(max=50)])
sukunimi0 = StringField('Sukunimi *', validators=[DataRequired(), length(max=50)])
phone0 = StringField('Puhelinnumero *', validators=[DataRequired(), length(max=50)])
email0 = StringField('Sähköposti *', validators=[DataRequired(), Email(), length(max=100)])
food0 = StringField('Ruokavalio ', validators=[length(max=100)])
allergies0 = StringField('Allergiat ', validators=[length(max=100)])
free0 = StringField('Vapaat terveiset ', validators=[length(max=100)])
consent0 = BooleanField('Olen lukenut tietosuojaselosteen ja hyväksyn tietojen käytön tapahtuman järjestämisessä *', validators=[DataRequired()])
submit = SubmitField('Ilmoittaudu')
| 53.889362
| 223
| 0.672892
| 2,333
| 25,328
| 7.27261
| 0.124303
| 0.173749
| 0.117169
| 0.129722
| 0.887016
| 0.881004
| 0.863146
| 0.828726
| 0.821654
| 0.821182
| 0
| 0.026902
| 0.157573
| 25,328
| 470
| 224
| 53.889362
| 0.76829
| 0.034981
| 0
| 0.7375
| 0
| 0
| 0.298741
| 0.011393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0125
| false
| 0
| 0.009375
| 0
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
411fefadfe092557567e65ccf3b467a42a552033
| 83
|
py
|
Python
|
models/lists.py
|
skamansam/python-web-todo
|
24ce9bd58efdc796c05e52d56b6c5e9befd48393
|
[
"Unlicense"
] | null | null | null |
models/lists.py
|
skamansam/python-web-todo
|
24ce9bd58efdc796c05e52d56b6c5e9befd48393
|
[
"Unlicense"
] | null | null | null |
models/lists.py
|
skamansam/python-web-todo
|
24ce9bd58efdc796c05e52d56b6c5e9befd48393
|
[
"Unlicense"
] | null | null | null |
# import ormodel
from models.ormodel import ORModel
class List(ORModel):
pass
| 13.833333
| 34
| 0.759036
| 11
| 83
| 5.727273
| 0.636364
| 0.412698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 83
| 5
| 35
| 16.6
| 0.926471
| 0.168675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
4124c3fb69b1ccdacd0ee1cd25b88a9545574ffd
| 20,877
|
py
|
Python
|
datahub_client/apis/view_api.py
|
amkimian/mimir_python
|
994c1542437fa6bd1d0e53b0c0c4c8f692575374
|
[
"Apache-2.0"
] | null | null | null |
datahub_client/apis/view_api.py
|
amkimian/mimir_python
|
994c1542437fa6bd1d0e53b0c0c4c8f692575374
|
[
"Apache-2.0"
] | null | null | null |
datahub_client/apis/view_api.py
|
amkimian/mimir_python
|
994c1542437fa6bd1d0e53b0c0c4c8f692575374
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
DataHub API
DataHub API
OpenAPI spec version: 0.0.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ViewApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_data_set_releases(self, api_key, user_id, dataset, **kwargs):
"""
Returns releases for a given data set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_data_set_releases(api_key, user_id, dataset, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param str user_id: The user id that owns the data set (required)
:param str dataset: The id of the data set (required)
:return: list[DataSetRelease]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_data_set_releases_with_http_info(api_key, user_id, dataset, **kwargs)
else:
(data) = self.get_data_set_releases_with_http_info(api_key, user_id, dataset, **kwargs)
return data
def get_data_set_releases_with_http_info(self, api_key, user_id, dataset, **kwargs):
"""
Returns releases for a given data set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_data_set_releases_with_http_info(api_key, user_id, dataset, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param str user_id: The user id that owns the data set (required)
:param str dataset: The id of the data set (required)
:return: list[DataSetRelease]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_key', 'user_id', 'dataset']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_data_set_releases" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_key' is set
if ('api_key' not in params) or (params['api_key'] is None):
raise ValueError("Missing the required parameter `api_key` when calling `get_data_set_releases`")
# verify the required parameter 'user_id' is set
if ('user_id' not in params) or (params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_data_set_releases`")
# verify the required parameter 'dataset' is set
if ('dataset' not in params) or (params['dataset'] is None):
raise ValueError("Missing the required parameter `dataset` when calling `get_data_set_releases`")
resource_path = '/view/releases/{userId}/{dataset}'.replace('{format}', 'json')
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id']
if 'dataset' in params:
path_params['dataset'] = params['dataset']
query_params = {}
header_params = {}
if 'api_key' in params:
header_params['api_key'] = params['api_key']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DataSetRelease]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_release_elements(self, api_key, user_id, dataset, release, **kwargs):
"""
Returns the element information for a given release
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_release_elements(api_key, user_id, dataset, release, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param str user_id: (required)
:param str dataset: (required)
:param str release: (required)
:return: list[DataElement]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_release_elements_with_http_info(api_key, user_id, dataset, release, **kwargs)
else:
(data) = self.get_release_elements_with_http_info(api_key, user_id, dataset, release, **kwargs)
return data
def get_release_elements_with_http_info(self, api_key, user_id, dataset, release, **kwargs):
"""
Returns the element information for a given release
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_release_elements_with_http_info(api_key, user_id, dataset, release, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param str user_id: (required)
:param str dataset: (required)
:param str release: (required)
:return: list[DataElement]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_key', 'user_id', 'dataset', 'release']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_release_elements" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_key' is set
if ('api_key' not in params) or (params['api_key'] is None):
raise ValueError("Missing the required parameter `api_key` when calling `get_release_elements`")
# verify the required parameter 'user_id' is set
if ('user_id' not in params) or (params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_release_elements`")
# verify the required parameter 'dataset' is set
if ('dataset' not in params) or (params['dataset'] is None):
raise ValueError("Missing the required parameter `dataset` when calling `get_release_elements`")
# verify the required parameter 'release' is set
if ('release' not in params) or (params['release'] is None):
raise ValueError("Missing the required parameter `release` when calling `get_release_elements`")
resource_path = '/view/elements/{userId}/{dataset}/{release}'.replace('{format}', 'json')
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id']
if 'dataset' in params:
path_params['dataset'] = params['dataset']
if 'release' in params:
path_params['release'] = params['release']
query_params = {}
header_params = {}
if 'api_key' in params:
header_params['api_key'] = params['api_key']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DataElement]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_user_subscriptions(self, api_key, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_user_subscriptions(api_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param int page: The page of results to return
:return: list[Subscription]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_user_subscriptions_with_http_info(api_key, **kwargs)
else:
(data) = self.get_user_subscriptions_with_http_info(api_key, **kwargs)
return data
def get_user_subscriptions_with_http_info(self, api_key, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_user_subscriptions_with_http_info(api_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param int page: The page of results to return
:return: list[Subscription]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_key', 'page']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_subscriptions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_key' is set
if ('api_key' not in params) or (params['api_key'] is None):
raise ValueError("Missing the required parameter `api_key` when calling `get_user_subscriptions`")
resource_path = '/view/subscriptions'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
header_params = {}
if 'api_key' in params:
header_params['api_key'] = params['api_key']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Subscription]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_user_views(self, api_key, **kwargs):
"""
Returns view information for datasets of a user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_user_views(api_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param int page: The page of results to return
:return: list[DataSetView]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_user_views_with_http_info(api_key, **kwargs)
else:
(data) = self.get_user_views_with_http_info(api_key, **kwargs)
return data
def get_user_views_with_http_info(self, api_key, **kwargs):
"""
Returns view information for datasets of a user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_user_views_with_http_info(api_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: The user api key (required)
:param int page: The page of results to return
:return: list[DataSetView]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_key', 'page']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_views" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_key' is set
if ('api_key' not in params) or (params['api_key'] is None):
raise ValueError("Missing the required parameter `api_key` when calling `get_user_views`")
resource_path = '/view/getUserViews'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
if 'api_key' in params:
header_params['api_key'] = params['api_key']
if 'page' in params:
header_params['page'] = params['page']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DataSetView]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 40.775391
| 124
| 0.577238
| 2,293
| 20,877
| 5.043175
| 0.090711
| 0.037357
| 0.031131
| 0.024905
| 0.886199
| 0.872449
| 0.867433
| 0.851608
| 0.847458
| 0.839242
| 0
| 0.000798
| 0.33956
| 20,877
| 511
| 125
| 40.855186
| 0.837903
| 0.34253
| 0
| 0.733051
| 1
| 0
| 0.177636
| 0.045436
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038136
| false
| 0
| 0.029661
| 0
| 0.122881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f5e437550b6e23c4c6c0528e99df5fe7e1d328bb
| 251
|
py
|
Python
|
ghidra_9.0/Ghidra/Features/Python/ghidra_scripts/external_module_caller.py
|
ChristopherMorrison/ghidra
|
e53aa57d1aff79a1df93728f746705c58fe95ab0
|
[
"Apache-2.0"
] | 3
|
2019-11-14T13:11:35.000Z
|
2019-12-02T20:51:49.000Z
|
ghidra_9.0/Ghidra/Features/Python/ghidra_scripts/external_module_caller.py
|
ChristopherMorrison/ghidra
|
e53aa57d1aff79a1df93728f746705c58fe95ab0
|
[
"Apache-2.0"
] | 3
|
2019-07-17T22:51:04.000Z
|
2019-12-04T05:43:56.000Z
|
ghidra_9.0/Ghidra/Features/Python/ghidra_scripts/external_module_caller.py
|
ChristopherMorrison/ghidra
|
e53aa57d1aff79a1df93728f746705c58fe95ab0
|
[
"Apache-2.0"
] | 3
|
2019-12-02T13:36:50.000Z
|
2019-12-04T05:40:12.000Z
|
# Example of importing an external Ghidra Python module
# @category: Examples.Python
# Import the external module that wants to access the Ghidra scripting API.
# NOTE: see external_module_callee.py for additional tips.
import external_module_callee
| 35.857143
| 75
| 0.816733
| 36
| 251
| 5.583333
| 0.694444
| 0.208955
| 0.199005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139442
| 251
| 6
| 76
| 41.833333
| 0.930556
| 0.840637
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
eb0680d9deaf4e2ae4891235b86761bca171cbef
| 157
|
py
|
Python
|
default_fs.py
|
bolaabcd/Polarization2
|
19d2287f3f73a58a20b96108a069169d75bbd820
|
[
"MIT"
] | null | null | null |
default_fs.py
|
bolaabcd/Polarization2
|
19d2287f3f73a58a20b96108a069169d75bbd820
|
[
"MIT"
] | null | null | null |
default_fs.py
|
bolaabcd/Polarization2
|
19d2287f3f73a58a20b96108a069169d75bbd820
|
[
"MIT"
] | null | null | null |
from types import FunctionType
import numpy as np
def same(num_agents : int, f : FunctionType) -> np.ndarray:
return np.full((num_agents,num_agents), f)
| 31.4
| 59
| 0.745223
| 25
| 157
| 4.56
| 0.64
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152866
| 157
| 5
| 60
| 31.4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
eb1adc145b8f261e3265557093b0dedb90197be5
| 172
|
py
|
Python
|
windows/debug/__init__.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 479
|
2016-01-08T00:53:34.000Z
|
2022-03-22T10:28:19.000Z
|
windows/debug/__init__.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 38
|
2017-12-29T17:09:04.000Z
|
2022-01-31T08:27:47.000Z
|
windows/debug/__init__.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 103
|
2016-01-10T01:32:17.000Z
|
2021-12-24T17:21:06.000Z
|
from .debugger import Debugger, HXBreakpoint
from .symboldbg import SymbolDebugger
from .localdbg import LocalDebugger
from .breakpoints import *
from .breakpoints import *
| 34.4
| 44
| 0.837209
| 19
| 172
| 7.578947
| 0.473684
| 0.208333
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 172
| 5
| 45
| 34.4
| 0.947368
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
de3861492bbad87b8ae49e1c2ba0a814242e7975
| 2,599
|
py
|
Python
|
tests/assert_queries_are_equal_test.py
|
edharper01/databricks_test
|
f0560c84bdd0dd3eefa976a1af901927a4f299be
|
[
"MIT"
] | null | null | null |
tests/assert_queries_are_equal_test.py
|
edharper01/databricks_test
|
f0560c84bdd0dd3eefa976a1af901927a4f299be
|
[
"MIT"
] | null | null | null |
tests/assert_queries_are_equal_test.py
|
edharper01/databricks_test
|
f0560c84bdd0dd3eefa976a1af901927a4f299be
|
[
"MIT"
] | 1
|
2020-12-23T20:50:30.000Z
|
2020-12-23T20:50:30.000Z
|
import databricks_test
import pytest
def test_results_match():
with databricks_test.session() as dbrickstest:
query = """
SELECT col1,col2
FROM
(VALUES
(100,'foo'),
(101,'bar'),
(102,'baz')
) AS v (col1, col2)
"""
dbrickstest.assert_queries_are_equal(query, query)
def test_results_do_not_match():
with databricks_test.session() as dbrickstest:
actual_query = """
SELECT col1,col2
FROM
(VALUES
(100,'foo'),
(101,'bar'),
(102,'baz')
) AS v (col1, col2)
"""
expected_query = """
SELECT col1,col2
FROM
(VALUES
(100,'foo'),
(110,'bar'),
(999,'qux')
) AS v (col1, col2)
"""
with pytest.raises(Exception) as exception_message:
dbrickstest.assert_queries_are_equal(actual_query, expected_query)
assert str(exception_message.value).startswith("the result sets did not match:")
def test_unexpected_result():
with databricks_test.session() as dbrickstest:
actual_query = """
SELECT col1,col2
FROM
(VALUES
(100,'foo'),
(101,'bar')
) AS v (col1, col2)
"""
expected_query = """
SELECT col1,col2
FROM
(VALUES
(100,'foo')
) AS v (col1, col2)
"""
expected_message="""the result sets did not match:
+---+----+----+
|m |col1|col2|
+---+----+----+
|= |100 |foo |
|> |101 |bar |
+---+----+----+
"""
with pytest.raises(Exception) as exception_message:
dbrickstest.assert_queries_are_equal(actual_query, expected_query)
assert str(exception_message.value)==expected_message
def test_missing_result():
with databricks_test.session() as dbrickstest:
actual_query = """
SELECT col1,col2
FROM
(VALUES
(100,'foo')
) AS v (col1, col2)
"""
expected_query = """
SELECT col1,col2
FROM
(VALUES
(100,'foo'),
(101,'bar')
) AS v (col1, col2)
"""
expected_message="""the result sets did not match:
+---+----+----+
|m |col1|col2|
+---+----+----+
|= |100 |foo |
|< |101 |bar |
+---+----+----+
"""
with pytest.raises(Exception) as exception_message:
dbrickstest.assert_queries_are_equal(actual_query, expected_query)
assert str(exception_message.value)==expected_message
| 23.414414
| 88
| 0.517122
| 264
| 2,599
| 4.905303
| 0.181818
| 0.098842
| 0.081081
| 0.102703
| 0.892664
| 0.867954
| 0.849421
| 0.812355
| 0.812355
| 0.812355
| 0
| 0.051534
| 0.335514
| 2,599
| 110
| 89
| 23.627273
| 0.698321
| 0
| 0
| 0.860215
| 0
| 0
| 0.48711
| 0
| 0
| 0
| 0
| 0
| 0.075269
| 1
| 0.043011
| false
| 0
| 0.021505
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
de3fcbb878c3c9f7aff713761feb6ea84a4a9969
| 2,880
|
py
|
Python
|
sanicbe/src/helpers/schema/prod_attribute_value.py
|
fairuztahir/mieagro-be
|
6b591ecbad7d146f2c252677ba09b9afe2e6e921
|
[
"MIT"
] | null | null | null |
sanicbe/src/helpers/schema/prod_attribute_value.py
|
fairuztahir/mieagro-be
|
6b591ecbad7d146f2c252677ba09b9afe2e6e921
|
[
"MIT"
] | null | null | null |
sanicbe/src/helpers/schema/prod_attribute_value.py
|
fairuztahir/mieagro-be
|
6b591ecbad7d146f2c252677ba09b9afe2e6e921
|
[
"MIT"
] | null | null | null |
prod_attr_value_post_schema = {
'id': {
'required': True,
'type': 'integer',
'coerce': int,
'min': 1
},
'name': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 100
},
'display_name': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 100
},
'ptav_active': {
'required': True,
'type': 'boolean',
'empty': False
},
'product_attribute_value_id': {
'required': True,
'type': 'list'
},
'attribute_line_id': {
'required': True,
'type': 'list'
},
'price_extra': {
'required': True,
'type': 'float'
},
'exclude_for': {
'required': True,
'type': 'list'
},
'product_tmpl_id': {
'required': True,
'type': 'list'
},
'attribute_id': {
'required': True,
'type': 'list'
},
'ptav_product_variant_ids': {
'required': True,
'type': 'list'
},
'is_custom': {
'required': True,
'type': 'boolean',
'empty': False
},
'display_type': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 100
},
'create_date': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 20
}
}
prod_attr_value_upd_schema = {
'id': {
'required': True,
'type': 'integer',
'coerce': int,
'min': 1
},
'name': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 100
},
'display_name': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 100
},
'ptav_active': {
'required': True,
'type': 'boolean',
'empty': False
},
'product_attribute_value_id': {
'required': True,
'type': 'list'
},
'attribute_line_id': {
'required': True,
'type': 'list'
},
'price_extra': {
'required': True,
'type': 'float'
},
'exclude_for': {
'required': True,
'type': 'list'
},
'product_tmpl_id': {
'required': True,
'type': 'list'
},
'attribute_id': {
'required': True,
'type': 'list'
},
'ptav_product_variant_ids': {
'required': True,
'type': 'list'
},
'is_custom': {
'required': True,
'type': 'boolean',
'empty': False
},
'display_type': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 100
},
'create_date': {
'required': True,
'type': 'string',
'empty': False,
'maxlength': 20
}
}
| 20.28169
| 35
| 0.431597
| 234
| 2,880
| 5.123932
| 0.179487
| 0.280234
| 0.373645
| 0.200167
| 0.972477
| 0.972477
| 0.972477
| 0.972477
| 0.972477
| 0.972477
| 0
| 0.013613
| 0.387847
| 2,880
| 141
| 36
| 20.425532
| 0.666478
| 0
| 0
| 0.771429
| 0
| 0
| 0.343056
| 0.034722
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
dee791539dfa8dc18aa2b464da6cefdea026b823
| 30,163
|
py
|
Python
|
components/image-handler/deleter.py
|
Kadantte/selfie2anime
|
0f084d8096bc077d24f2b56385802a74e1529a33
|
[
"MIT"
] | 168
|
2019-08-17T17:51:34.000Z
|
2022-03-25T10:00:33.000Z
|
components/image-handler/deleter.py
|
Kadantte/selfie2anime
|
0f084d8096bc077d24f2b56385802a74e1529a33
|
[
"MIT"
] | 1
|
2021-12-03T04:51:56.000Z
|
2021-12-03T04:51:56.000Z
|
components/image-handler/deleter.py
|
Kadantte/selfie2anime
|
0f084d8096bc077d24f2b56385802a74e1529a33
|
[
"MIT"
] | 28
|
2019-08-18T04:06:23.000Z
|
2022-01-17T13:48:12.000Z
|
import boto3
import os
from string import Template
from time import time
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core import patch
from boto3.dynamodb.conditions import Key
libraries = (['boto3'])
patch(libraries)
dynamo_table = os.environ['DYNAMO_TABLE']
cloudfront_dist = os.environ['CLOUDFRONT_DIST']
def delete(event, context):
uuid = event['queryStringParameters']['uuid']
key = event['queryStringParameters']['key']
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(dynamo_table)
resp = table.query(
# Add the name of the index you want to use in your query.
IndexName="key-index",
KeyConditionExpression=Key('key').eq(uuid),
)
for item in resp['Items']:
if item['token'] == key:
s3 = boto3.resource('s3')
# Delete outgoing
try:
s3.Object(item['bucket'], 'outgoing/{}'.format(item['key'])).delete()
except Exception as e:
print('Failed to delete S3 outgoing: {}'.format(item['key']))
# Delete incoming-cropped
try:
s3.Object(item['bucket'], 'incoming-cropped/{}'.format(item['key'])).delete()
except Exception as e:
print('Failed to delete S3 incoming-cropped: {}'.format(item['key']))
# Delete incoming
try:
s3.Object(item['bucket'], 'incoming/{}'.format(item['key'])).delete()
except Exception as e:
print('Failed to delete S3 incoming: {}'.format(item['key']))
try:
client = boto3.client('cloudfront')
response = client.create_invalidation(
DistributionId=cloudfront_dist,
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': [
'/outgoing/{}'.format(item['key'])
],
},
'CallerReference': str(time()).replace(".", "")
}
)
except Exception as e:
print('Failed to invalidate cache for: {}'.format(item['key']))
html_body = """
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml"
xmlns:o="urn:schemas-microsoft-com:office:office">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta http-equiv="refresh" content="7; url=https://selfie2anime.com/">
<title>Selfie Deleted | Selfie2Anime</title>
<link href="https://fonts.googleapis.com/css?family=Merriweather+Sans:400,400i,700,700i" rel="stylesheet">
<style>
* {
-ms-text-size-adjust: 100%;
-webkit-text-size-adjust: 100%;
}
html,
body {
margin: 0 auto !important;
padding: 0 !important;
height: 100% !important;
width: 100% !important;
background: #f1f1f1;
}
div[style*="margin: 16px 0"] {
margin: 0 !important;
}
table,
td {
mso-table-lspace: 0pt !important;
mso-table-rspace: 0pt !important;
}
table {
border-spacing: 0 !important;
border-collapse: collapse !important;
table-layout: fixed !important;
margin: 0 auto !important;
}
img {
-ms-interpolation-mode: bicubic;
}
a {
text-decoration: none;
}
*[x-apple-data-detectors],
.unstyle-auto-detected-links *,
.aBn {
border-bottom: 0 !important;
cursor: default !important;
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
.a6S {
display: none !important;
opacity: 0.01 !important;
}
.im {
color: inherit !important;
}
img.g-img+div {
display: none !important;
}
@media only screen and (min-device-width: 320px) and (max-device-width: 374px) {
u~div .email-container {
min-width: 320px !important;
}
}
@media only screen and (min-device-width: 375px) and (max-device-width: 413px) {
u~div .email-container {
min-width: 375px !important;
}
}
@media only screen and (min-device-width: 414px) {
u~div .email-container {
min-width: 414px !important;
}
}
</style>
<style>
.bg_primary {
background: #f06292;
}
.text_primary {
color: #f06292;
font-weight: bold;
}
.try_again {
font-size: 2em;
}
.bg_white {
background: #fff;
}
.bg_dark {
background: rgba(0, 0, 0, .8);
}
.email-section {
padding: 2.5em;
}
h1,
h2,
h3,
h4,
h5,
h6 {
font-family: 'Merriweather Sans', sans-serif;
color: #000;
margin-top: 0;
}
body {
font-family: 'Merriweather Sans', sans-serif;
font-weight: 400;
font-size: 18px;
line-height: 1.8;
color: rgba(0, 0, 0, .7);
}
a {
color: #f06292;
font-weight: bold;
}
.logo h1 {
margin: 0;
}
.logo h1 a {
color: #000;
font-size: 24px;
font-weight: 700;
text-transform: uppercase;
font-family: 'Merriweather Sans', sans-serif;
}
.navigation {
padding: 0;
}
.navigation li {
list-style: none;
display: inline-block;
;
margin-left: 5px;
font-size: 12px;
font-weight: 700;
text-transform: uppercase;
}
.navigation li a {
color: rgba(0, 0, 0, .6);
}
.heading-section h2 {
color: #000;
font-size: 28px;
margin-top: 0;
line-height: 1.4;
font-weight: 700;
}
.heading-section .subheading {
margin-bottom: 20px !important;
display: inline-block;
font-size: 13px;
text-transform: uppercase;
letter-spacing: 2px;
color: rgba(0, 0, 0, .4);
position: relative;
}
.heading-section .subheading::after {
position: absolute;
left: 0;
right: 0;
bottom: -10px;
content: '';
width: 100%;
height: 2px;
background: #f5564e;
margin: 0 auto;
}
.heading-section-white {
color: rgba(255, 255, 255, .8);
}
.heading-section-white h2 {
line-height: 1;
padding-bottom: 0;
}
.heading-section-white h2 {
color: #fff;
}
.heading-section-white .subheading {
margin-bottom: 0;
display: inline-block;
font-size: 13px;
text-transform: uppercase;
letter-spacing: 2px;
color: rgba(255, 255, 255, .4);
}
.call-to-action {
padding: 1em 2em;
background: #f06292;
color: #fff;
border-radius: 999px;
}
</style>
</head>
<body width="100%" style="margin: 0; padding: 0 !important; mso-line-height-rule: exactly; background-color: #222;">
<center style="width: 100%; background-color: #f1f1f1;">
<div
style="display: none; font-size: 1px;max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden; mso-hide: all; font-family: sans-serif;">
‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌
</div>
<div style="max-width: 600px; margin: 0 auto;" class="email-container">
<table align="center" role="presentation" cellspacing="0" cellpadding="0" border="0" width="100%"
style="margin: auto;">
<tr>
<td valign="top" class="bg_white" style="padding: 1em 2.5em;">
<table role="presentation" border="0" cellpadding="0" cellspacing="0" width="100%">
<tr>
<td width="60%" class="logo" style="text-align: left;">
<h1>
<a href="https://selfie2anime.com">
Selfie<span class="text_primary">2</span>Anime <span
class="text_primary">アニメ</span>
</a>
</h1>
</td>
<td width="40%" class="logo" style="text-align: right;">
<a href="https://www.facebook.com/sharer/sharer.php?u=https://selfie2anime.com">
<img width="32" height="32" src="https://selfie2anime.com/email/facebook.png"
alt="Share on Facebook">
</a>
<a href="https://twitter.com/intent/tweet?url=https://selfie2anime.com&text=What do YOU look like in anime?&hashtags=selfie2anime"
target="_blank">
<img width="32" height="32" src="https://selfie2anime.com/email/twitter.png"
alt="Share on Twitter">
</a>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td valign="middle" class="bg_white" style="padding: 80px 0;">
<div class="overlay"></div>
<table>
<tr>
<td>
<div class="text" style="text-align: center; margin: 0 20px">
Your anime-selfie has been <b>permanently deleted</b>.
</div>
<div class="text" style="text-align: center; margin: 4em 0">
<a href="https://selfie2anime.com/" class="call-to-action">
GENERATE ANOTHER ONE!
</a>
</div>
<div class="text" style="text-align: center; font-size: 8pt; margin: 0 20px">
You will be automatically redirected to <a
href="https://selfie2anime.com/">selfie2anime.com</a>.
</div>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td valign="middle" class="bg_primary"
style="background-image: url(https://selfie2anime.com/email/wall.jpg); background-size: cover; height: 480px;">
</td>
</tr>
<tr>
<td class="bg_dark email-section" style="text-align:center;">
<div class="heading-section heading-section-white">
<p>
Copyright © 2019-2020 by
<a href="https://selfie2anime.com">Selfie2Anime.com</a>
</p>
</div>
</td>
</tr>
</table>
</div>
</center>
</body>
</html>
"""
else:
html_body = """
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml"
xmlns:o="urn:schemas-microsoft-com:office:office">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Selfie Error | Selfie2Anime</title>
<link href="https://fonts.googleapis.com/css?family=Merriweather+Sans:400,400i,700,700i" rel="stylesheet">
<style>
* {
-ms-text-size-adjust: 100%;
-webkit-text-size-adjust: 100%;
}
html,
body {
margin: 0 auto !important;
padding: 0 !important;
height: 100% !important;
width: 100% !important;
background: #f1f1f1;
}
div[style*="margin: 16px 0"] {
margin: 0 !important;
}
table,
td {
mso-table-lspace: 0pt !important;
mso-table-rspace: 0pt !important;
}
table {
border-spacing: 0 !important;
border-collapse: collapse !important;
table-layout: fixed !important;
margin: 0 auto !important;
}
img {
-ms-interpolation-mode: bicubic;
}
a {
text-decoration: none;
}
*[x-apple-data-detectors],
.unstyle-auto-detected-links *,
.aBn {
border-bottom: 0 !important;
cursor: default !important;
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
.a6S {
display: none !important;
opacity: 0.01 !important;
}
.im {
color: inherit !important;
}
img.g-img+div {
display: none !important;
}
@media only screen and (min-device-width: 320px) and (max-device-width: 374px) {
u~div .email-container {
min-width: 320px !important;
}
}
@media only screen and (min-device-width: 375px) and (max-device-width: 413px) {
u~div .email-container {
min-width: 375px !important;
}
}
@media only screen and (min-device-width: 414px) {
u~div .email-container {
min-width: 414px !important;
}
}
</style>
<style>
.bg_primary {
background: #f06292;
}
.text_primary {
color: #f06292;
font-weight: bold;
}
.try_again {
font-size: 2em;
}
.bg_white {
background: #fff;
}
.bg_dark {
background: rgba(0, 0, 0, .8);
}
.email-section {
padding: 2.5em;
}
h1,
h2,
h3,
h4,
h5,
h6 {
font-family: 'Merriweather Sans', sans-serif;
color: #000;
margin-top: 0;
}
body {
font-family: 'Merriweather Sans', sans-serif;
font-weight: 400;
font-size: 18px;
line-height: 1.8;
color: rgba(0, 0, 0, .7);
}
a {
color: #f06292;
font-weight: bold;
}
.logo h1 {
margin: 0;
}
.logo h1 a {
color: #000;
font-size: 24px;
font-weight: 700;
text-transform: uppercase;
font-family: 'Merriweather Sans', sans-serif;
}
.navigation {
padding: 0;
}
.navigation li {
list-style: none;
display: inline-block;
;
margin-left: 5px;
font-size: 12px;
font-weight: 700;
text-transform: uppercase;
}
.navigation li a {
color: rgba(0, 0, 0, .6);
}
.heading-section h2 {
color: #000;
font-size: 28px;
margin-top: 0;
line-height: 1.4;
font-weight: 700;
}
.heading-section .subheading {
margin-bottom: 20px !important;
display: inline-block;
font-size: 13px;
text-transform: uppercase;
letter-spacing: 2px;
color: rgba(0, 0, 0, .4);
position: relative;
}
.heading-section .subheading::after {
position: absolute;
left: 0;
right: 0;
bottom: -10px;
content: '';
width: 100%;
height: 2px;
background: #f5564e;
margin: 0 auto;
}
.heading-section-white {
color: rgba(255, 255, 255, .8);
}
.heading-section-white h2 {
line-height: 1;
padding-bottom: 0;
}
.heading-section-white h2 {
color: #fff;
}
.heading-section-white .subheading {
margin-bottom: 0;
display: inline-block;
font-size: 13px;
text-transform: uppercase;
letter-spacing: 2px;
color: rgba(255, 255, 255, .4);
}
.call-to-action {
padding: 1em 2em;
background: #f06292;
color: #fff;
border-radius: 999px;
}
</style>
</head>
<body width="100%" style="margin: 0; padding: 0 !important; mso-line-height-rule: exactly; background-color: #222;">
<center style="width: 100%; background-color: #f1f1f1;">
<div
style="display: none; font-size: 1px;max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden; mso-hide: all; font-family: sans-serif;">
‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌
</div>
<div style="max-width: 600px; margin: 0 auto;" class="email-container">
<table align="center" role="presentation" cellspacing="0" cellpadding="0" border="0" width="100%"
style="margin: auto;">
<tr>
<td valign="top" class="bg_white" style="padding: 1em 2.5em;">
<table role="presentation" border="0" cellpadding="0" cellspacing="0" width="100%">
<tr>
<td width="60%" class="logo" style="text-align: left;">
<h1>
<a href="https://selfie2anime.com">
Selfie<span class="text_primary">2</span>Anime <span
class="text_primary">アニメ</span>
</a>
</h1>
</td>
<td width="40%" class="logo" style="text-align: right;">
<a href="https://www.facebook.com/sharer/sharer.php?u=https://selfie2anime.com">
<img width="32" height="32" src="https://selfie2anime.com/email/facebook.png"
alt="Share on Facebook">
</a>
<a href="https://twitter.com/intent/tweet?url=https://selfie2anime.com&text=What do YOU look like in anime?&hashtags=selfie2anime"
target="_blank">
<img width="32" height="32" src="https://selfie2anime.com/email/twitter.png"
alt="Share on Twitter">
</a>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td valign="middle" class="bg_white" style="padding: 80px 0;">
<div class="overlay"></div>
<table>
<tr>
<td>
<div class="text" style="text-align: center; margin: 0 20px">
Your verification token was invalid.
</div>
<div class="text" style="font-size: 10px; text-align: center; margin: 0 50px">
Make sure you clicked the delete image button from the email you received. If that doesn't work, contact us at <a href="mailto:legal@selfie2anime.com">legal@selfie2anime.com</a> to have your data removed another way.</div>
</div>
<div class="text" style="text-align: center; margin: 4em 0">
<a href="https://selfie2anime.com/" class="call-to-action">
GENERATE ANOTHER ONE!
</a>
</div>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td valign="middle" class="bg_primary"
style="background-image: url(https://selfie2anime.com/email/wall.jpg); background-size: cover; height: 480px;">
</td>
</tr>
<tr>
<td class="bg_dark email-section" style="text-align:center;">
<div class="heading-section heading-section-white">
<p>
Copyright © 2019-2020 by
<a href="https://selfie2anime.com">Selfie2Anime.com</a>
</p>
</div>
</td>
</tr>
</table>
</div>
</center>
</body>
</html>
"""
response = {
"statusCode": 200,
"body": html_body,
"headers": {
'Content-Type': 'text/html',
}
}
return response
| 42.245098
| 274
| 0.340981
| 2,154
| 30,163
| 4.756732
| 0.167595
| 0.028109
| 0.03982
| 0.053094
| 0.880344
| 0.869217
| 0.855651
| 0.852625
| 0.845598
| 0.845598
| 0
| 0.046658
| 0.563008
| 30,163
| 713
| 275
| 42.304348
| 0.730673
| 0.003713
| 0
| 0.770968
| 0
| 0.053226
| 0.935
| 0.061006
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001613
| false
| 0
| 0.101613
| 0
| 0.104839
| 0.006452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
72529c2f409e92731e3ff330003b4dd7f542349a
| 61,555
|
py
|
Python
|
cloudmersive_convert_api_client/api/edit_html_api.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 3
|
2018-07-25T23:04:34.000Z
|
2021-08-10T16:43:10.000Z
|
cloudmersive_convert_api_client/api/edit_html_api.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 3
|
2020-11-23T10:46:48.000Z
|
2021-12-30T14:09:34.000Z
|
cloudmersive_convert_api_client/api/edit_html_api.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 2
|
2020-01-07T09:48:01.000Z
|
2020-11-23T10:47:00.000Z
|
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudmersive_convert_api_client.api_client import ApiClient
class EditHtmlApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def edit_html_html_append_header_tag(self, html_tag, **kwargs): # noqa: E501
"""Append an HTML tag to the HEAD section of an HTML Document # noqa: E501
Appends an HTML tag to the HEAD section of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_header_tag(html_tag, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str html_tag: The HTML tag to append. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_append_header_tag_with_http_info(html_tag, **kwargs) # noqa: E501
else:
(data) = self.edit_html_html_append_header_tag_with_http_info(html_tag, **kwargs) # noqa: E501
return data
def edit_html_html_append_header_tag_with_http_info(self, html_tag, **kwargs): # noqa: E501
"""Append an HTML tag to the HEAD section of an HTML Document # noqa: E501
Appends an HTML tag to the HEAD section of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_header_tag_with_http_info(html_tag, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str html_tag: The HTML tag to append. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['html_tag', 'input_file', 'input_file_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_append_header_tag" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'html_tag' is set
if ('html_tag' not in params or
params['html_tag'] is None):
raise ValueError("Missing the required parameter `html_tag` when calling `edit_html_html_append_header_tag`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'html_tag' in params:
header_params['htmlTag'] = params['html_tag'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/head/append/tag', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_append_heading(self, heading_text, **kwargs): # noqa: E501
"""Append a Heading to an HTML Document # noqa: E501
Appends a heading to the end of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_heading(heading_text, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str heading_text: The text content to be used in the header. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param int heading_size: Optional: The heading size number. Default is 1. Accepts values between 1 and 6.
:param str css_style: Optional: The CSS style for the heading.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_append_heading_with_http_info(heading_text, **kwargs) # noqa: E501
else:
(data) = self.edit_html_html_append_heading_with_http_info(heading_text, **kwargs) # noqa: E501
return data
def edit_html_html_append_heading_with_http_info(self, heading_text, **kwargs): # noqa: E501
"""Append a Heading to an HTML Document # noqa: E501
Appends a heading to the end of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_heading_with_http_info(heading_text, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str heading_text: The text content to be used in the header. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param int heading_size: Optional: The heading size number. Default is 1. Accepts values between 1 and 6.
:param str css_style: Optional: The CSS style for the heading.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['heading_text', 'input_file', 'input_file_url', 'heading_size', 'css_style'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_append_heading" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'heading_text' is set
if ('heading_text' not in params or
params['heading_text'] is None):
raise ValueError("Missing the required parameter `heading_text` when calling `edit_html_html_append_heading`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'heading_text' in params:
header_params['headingText'] = params['heading_text'] # noqa: E501
if 'heading_size' in params:
header_params['headingSize'] = params['heading_size'] # noqa: E501
if 'css_style' in params:
header_params['cssStyle'] = params['css_style'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/append/heading', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_append_image_from_url(self, image_url, **kwargs): # noqa: E501
"""Append an Image to an HTML Document from a URL # noqa: E501
Appends an image to the end of an HTML document using a URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_image_from_url(image_url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str image_url: The URL for the image. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param str css_style: Optional: CSS style for the image.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_append_image_from_url_with_http_info(image_url, **kwargs) # noqa: E501
else:
(data) = self.edit_html_html_append_image_from_url_with_http_info(image_url, **kwargs) # noqa: E501
return data
def edit_html_html_append_image_from_url_with_http_info(self, image_url, **kwargs): # noqa: E501
"""Append an Image to an HTML Document from a URL # noqa: E501
Appends an image to the end of an HTML document using a URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_image_from_url_with_http_info(image_url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str image_url: The URL for the image. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param str css_style: Optional: CSS style for the image.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['image_url', 'input_file', 'input_file_url', 'css_style'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_append_image_from_url" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'image_url' is set
if ('image_url' not in params or
params['image_url'] is None):
raise ValueError("Missing the required parameter `image_url` when calling `edit_html_html_append_image_from_url`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'image_url' in params:
header_params['imageUrl'] = params['image_url'] # noqa: E501
if 'css_style' in params:
header_params['cssStyle'] = params['css_style'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/append/image/from-url', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_append_image_inline(self, **kwargs): # noqa: E501
"""Append a Base64 Inline Image to an HTML Document # noqa: E501
Appends a base64 inline image to the end of an HTML document from an input file or URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_image_inline(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param file image_file: Optional: Image file to be appended as base64 inline image.
:param str image_url: Optional: Image URL to be appended as base64 inline image.
:param str css_style: Optional: CSS style for the image.
:param str image_extension: Optional: The extension (JPG, PNG, GIF, etc.) of the image file. Recommended if uploading an imageFile directly, instead of using imageUrl. If no extension can be determined, will default to JPG.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_append_image_inline_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.edit_html_html_append_image_inline_with_http_info(**kwargs) # noqa: E501
return data
def edit_html_html_append_image_inline_with_http_info(self, **kwargs): # noqa: E501
"""Append a Base64 Inline Image to an HTML Document # noqa: E501
Appends a base64 inline image to the end of an HTML document from an input file or URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_image_inline_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param file image_file: Optional: Image file to be appended as base64 inline image.
:param str image_url: Optional: Image URL to be appended as base64 inline image.
:param str css_style: Optional: CSS style for the image.
:param str image_extension: Optional: The extension (JPG, PNG, GIF, etc.) of the image file. Recommended if uploading an imageFile directly, instead of using imageUrl. If no extension can be determined, will default to JPG.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'input_file_url', 'image_file', 'image_url', 'css_style', 'image_extension'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_append_image_inline" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'image_url' in params:
header_params['imageUrl'] = params['image_url'] # noqa: E501
if 'css_style' in params:
header_params['cssStyle'] = params['css_style'] # noqa: E501
if 'image_extension' in params:
header_params['imageExtension'] = params['image_extension'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
if 'image_file' in params:
local_var_files['imageFile'] = params['image_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/append/image/inline', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_append_paragraph(self, paragraph_text, **kwargs): # noqa: E501
"""Append a Paragraph to an HTML Document # noqa: E501
Appends a paragraph to the end of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_paragraph(paragraph_text, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str paragraph_text: The text content to be used in the paragraph. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param str css_style: Optional: The CSS style for the paragraph.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_append_paragraph_with_http_info(paragraph_text, **kwargs) # noqa: E501
else:
(data) = self.edit_html_html_append_paragraph_with_http_info(paragraph_text, **kwargs) # noqa: E501
return data
def edit_html_html_append_paragraph_with_http_info(self, paragraph_text, **kwargs): # noqa: E501
"""Append a Paragraph to an HTML Document # noqa: E501
Appends a paragraph to the end of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_append_paragraph_with_http_info(paragraph_text, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str paragraph_text: The text content to be used in the paragraph. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param str css_style: Optional: The CSS style for the paragraph.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['paragraph_text', 'input_file', 'input_file_url', 'css_style'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_append_paragraph" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'paragraph_text' is set
if ('paragraph_text' not in params or
params['paragraph_text'] is None):
raise ValueError("Missing the required parameter `paragraph_text` when calling `edit_html_html_append_paragraph`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'paragraph_text' in params:
header_params['paragraphText'] = params['paragraph_text'] # noqa: E501
if 'css_style' in params:
header_params['cssStyle'] = params['css_style'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/append/paragraph', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_create_blank_document(self, **kwargs): # noqa: E501
"""Create a Blank HTML Document # noqa: E501
Returns a blank HTML Document format file. The file is blank, with no contents by default. Use the optional input parameters to add various starting elements. Use additional editing commands such as Append Header, Append Paragraph or Append Image from URL to populate the document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_create_blank_document(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str title: Optional: The title of the HTML document
:param str css_url: Optional: A CSS style URL to be added to the document.
:param str css_inline: Optional: An inline CSS style to be added to the document.
:param str javascript_url: Optional: Javascript URL to be added to the document.
:param str javascript_inline: Optional: Inline Javascript to be added to the document.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_create_blank_document_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.edit_html_html_create_blank_document_with_http_info(**kwargs) # noqa: E501
return data
def edit_html_html_create_blank_document_with_http_info(self, **kwargs): # noqa: E501
"""Create a Blank HTML Document # noqa: E501
Returns a blank HTML Document format file. The file is blank, with no contents by default. Use the optional input parameters to add various starting elements. Use additional editing commands such as Append Header, Append Paragraph or Append Image from URL to populate the document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_create_blank_document_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str title: Optional: The title of the HTML document
:param str css_url: Optional: A CSS style URL to be added to the document.
:param str css_inline: Optional: An inline CSS style to be added to the document.
:param str javascript_url: Optional: Javascript URL to be added to the document.
:param str javascript_inline: Optional: Inline Javascript to be added to the document.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['title', 'css_url', 'css_inline', 'javascript_url', 'javascript_inline'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_create_blank_document" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'title' in params:
header_params['title'] = params['title'] # noqa: E501
if 'css_url' in params:
header_params['cssUrl'] = params['css_url'] # noqa: E501
if 'css_inline' in params:
header_params['cssInline'] = params['css_inline'] # noqa: E501
if 'javascript_url' in params:
header_params['javascriptUrl'] = params['javascript_url'] # noqa: E501
if 'javascript_inline' in params:
header_params['javascriptInline'] = params['javascript_inline'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/create/blank', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_get_language(self, **kwargs): # noqa: E501
"""Gets the language for the HTML document # noqa: E501
Retrieves the language code (e.g. \"en\" or \"de\") of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_language(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: HtmlGetLanguageResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_get_language_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.edit_html_html_get_language_with_http_info(**kwargs) # noqa: E501
return data
def edit_html_html_get_language_with_http_info(self, **kwargs): # noqa: E501
"""Gets the language for the HTML document # noqa: E501
Retrieves the language code (e.g. \"en\" or \"de\") of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_language_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: HtmlGetLanguageResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'input_file_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_get_language" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/head/get/language', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HtmlGetLanguageResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_get_links(self, **kwargs): # noqa: E501
"""Extract resolved link URLs from HTML File # noqa: E501
Extracts the resolved link URLs, fully-qualified if possible, from an input HTML file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_links(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param str base_url: Optional: Base URL of the page, such as https://mydomain.com
:return: HtmlGetLinksResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_get_links_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.edit_html_html_get_links_with_http_info(**kwargs) # noqa: E501
return data
def edit_html_html_get_links_with_http_info(self, **kwargs): # noqa: E501
"""Extract resolved link URLs from HTML File # noqa: E501
Extracts the resolved link URLs, fully-qualified if possible, from an input HTML file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_links_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:param str base_url: Optional: Base URL of the page, such as https://mydomain.com
:return: HtmlGetLinksResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'input_file_url', 'base_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_get_links" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'base_url' in params:
header_params['baseUrl'] = params['base_url'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/extract/links', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HtmlGetLinksResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_get_rel_canonical(self, **kwargs): # noqa: E501
"""Gets the rel canonical URL for the HTML document # noqa: E501
Gets the rel canonical URL of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_rel_canonical(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: HtmlGetRelCanonicalUrlResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_get_rel_canonical_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.edit_html_html_get_rel_canonical_with_http_info(**kwargs) # noqa: E501
return data
def edit_html_html_get_rel_canonical_with_http_info(self, **kwargs): # noqa: E501
"""Gets the rel canonical URL for the HTML document # noqa: E501
Gets the rel canonical URL of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_rel_canonical_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: HtmlGetRelCanonicalUrlResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'input_file_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_get_rel_canonical" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/head/get/rel-canonical-url', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HtmlGetRelCanonicalUrlResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_get_sitemap(self, **kwargs): # noqa: E501
"""Gets the sitemap URL for the HTML document # noqa: E501
Gets the sitemap link URL of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_sitemap(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: HtmlGetSitemapUrlResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_get_sitemap_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.edit_html_html_get_sitemap_with_http_info(**kwargs) # noqa: E501
return data
def edit_html_html_get_sitemap_with_http_info(self, **kwargs): # noqa: E501
"""Gets the sitemap URL for the HTML document # noqa: E501
Gets the sitemap link URL of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_get_sitemap_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: HtmlGetSitemapUrlResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'input_file_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_get_sitemap" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/head/get/sitemap-url', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HtmlGetSitemapUrlResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_set_language(self, language_code, **kwargs): # noqa: E501
"""Sets the language for the HTML document # noqa: E501
Sets the language code of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_set_language(language_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str language_code: The HTML langauge code to set. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_set_language_with_http_info(language_code, **kwargs) # noqa: E501
else:
(data) = self.edit_html_html_set_language_with_http_info(language_code, **kwargs) # noqa: E501
return data
def edit_html_html_set_language_with_http_info(self, language_code, **kwargs): # noqa: E501
"""Sets the language for the HTML document # noqa: E501
Sets the language code of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_set_language_with_http_info(language_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str language_code: The HTML langauge code to set. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['language_code', 'input_file', 'input_file_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_set_language" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'language_code' is set
if ('language_code' not in params or
params['language_code'] is None):
raise ValueError("Missing the required parameter `language_code` when calling `edit_html_html_set_language`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'language_code' in params:
header_params['languageCode'] = params['language_code'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/head/set/language', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_set_rel_canonical(self, canonical_url, **kwargs): # noqa: E501
"""Sets the rel canonical URL for the HTML document # noqa: E501
Sets the rel canonical URL of an HTML document. This is useful for telling search engines and other indexers which pages are duplicates of eachother; any pages with the rel=canonical tag will be treated as duplicates of the canonical URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_set_rel_canonical(canonical_url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str canonical_url: The HTML canonical URL to set. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_set_rel_canonical_with_http_info(canonical_url, **kwargs) # noqa: E501
else:
(data) = self.edit_html_html_set_rel_canonical_with_http_info(canonical_url, **kwargs) # noqa: E501
return data
def edit_html_html_set_rel_canonical_with_http_info(self, canonical_url, **kwargs): # noqa: E501
"""Sets the rel canonical URL for the HTML document # noqa: E501
Sets the rel canonical URL of an HTML document. This is useful for telling search engines and other indexers which pages are duplicates of eachother; any pages with the rel=canonical tag will be treated as duplicates of the canonical URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_set_rel_canonical_with_http_info(canonical_url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str canonical_url: The HTML canonical URL to set. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['canonical_url', 'input_file', 'input_file_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_set_rel_canonical" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'canonical_url' is set
if ('canonical_url' not in params or
params['canonical_url'] is None):
raise ValueError("Missing the required parameter `canonical_url` when calling `edit_html_html_set_rel_canonical`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'canonical_url' in params:
header_params['canonicalUrl'] = params['canonical_url'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/head/set/rel-canonical-url', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_html_html_set_sitemap_url(self, sitemap_url, **kwargs): # noqa: E501
"""Sets the sitemap URL for the HTML document # noqa: E501
Sets the sitemap URL of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_set_sitemap_url(sitemap_url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sitemap_url: The HTML sitemap URL to set. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_html_html_set_sitemap_url_with_http_info(sitemap_url, **kwargs) # noqa: E501
else:
(data) = self.edit_html_html_set_sitemap_url_with_http_info(sitemap_url, **kwargs) # noqa: E501
return data
def edit_html_html_set_sitemap_url_with_http_info(self, sitemap_url, **kwargs): # noqa: E501
"""Sets the sitemap URL for the HTML document # noqa: E501
Sets the sitemap URL of an HTML document. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_html_html_set_sitemap_url_with_http_info(sitemap_url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sitemap_url: The HTML sitemap URL to set. (required)
:param file input_file: Optional: Input file to perform the operation on.
:param str input_file_url: Optional: URL of a file to operate on as input.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sitemap_url', 'input_file', 'input_file_url'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_html_html_set_sitemap_url" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sitemap_url' is set
if ('sitemap_url' not in params or
params['sitemap_url'] is None):
raise ValueError("Missing the required parameter `sitemap_url` when calling `edit_html_html_set_sitemap_url`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'input_file_url' in params:
header_params['inputFileUrl'] = params['input_file_url'] # noqa: E501
if 'sitemap_url' in params:
header_params['sitemapUrl'] = params['sitemap_url'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/edit/html/head/set/sitemap-url', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.669811
| 306
| 0.632491
| 7,692
| 61,555
| 4.804602
| 0.035621
| 0.045675
| 0.031821
| 0.025327
| 0.955543
| 0.942582
| 0.933084
| 0.923587
| 0.917878
| 0.905837
| 0
| 0.014986
| 0.282365
| 61,555
| 1,377
| 307
| 44.702251
| 0.821637
| 0.382
| 0
| 0.772169
| 1
| 0
| 0.215174
| 0.058558
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036835
| false
| 0
| 0.005457
| 0
| 0.096862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a0fcb9c661abce26b7869dfa7de6b88f032eff9d
| 127
|
py
|
Python
|
ravegen/generateAp.py
|
mytab0r/RaveGen-Telegram-bot-generator
|
4b42ae622554c4b2442b35b1181f8f09886215d2
|
[
"MIT"
] | 1
|
2020-06-13T17:16:57.000Z
|
2020-06-13T17:16:57.000Z
|
ravegen/generateAp.py
|
NICK-FTW/RaveGen-Telegram-bot-generator
|
269b36333a31cadb697f3c1250c6bf118cdc7fcc
|
[
"MIT"
] | 5
|
2019-04-03T19:10:54.000Z
|
2019-06-14T17:21:14.000Z
|
ravegen/generateAp.py
|
NICK-FTW/RaveGen-Telegram-bot-generator
|
269b36333a31cadb697f3c1250c6bf118cdc7fcc
|
[
"MIT"
] | 2
|
2019-03-19T19:45:05.000Z
|
2021-02-07T18:04:33.000Z
|
import ConsoleEngine.generateAutoCompletition as generateAutoCompletition
generateAutoCompletition.generateAutoCompletition()
| 31.75
| 73
| 0.92126
| 7
| 127
| 16.714286
| 0.571429
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047244
| 127
| 3
| 74
| 42.333333
| 0.966942
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
9d0648ef5855f2000c98a0fb0edac445c5e5a2a2
| 104,677
|
py
|
Python
|
models/H_Net.py
|
zxg3017/H-Net
|
30c3c08292f4fccf76a63341392488b8baee3d3d
|
[
"MIT"
] | null | null | null |
models/H_Net.py
|
zxg3017/H-Net
|
30c3c08292f4fccf76a63341392488b8baee3d3d
|
[
"MIT"
] | null | null | null |
models/H_Net.py
|
zxg3017/H-Net
|
30c3c08292f4fccf76a63341392488b8baee3d3d
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from torch.autograd import Variable
from functools import partial
from segmentation_models_pytorch import create_model
import segmentation_models_pytorch as smp
nonlinearity = partial(F.relu, inplace=True)
BN_EPS = 1e-4 #1e-4 #1e-5
class H_Net_V1(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(H_Net_V1, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(3, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(3, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# ce_net encoder part
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# the center of M_Net
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the center of ce_Net
self.dblock = DACblock(512) # 空洞卷积
self.spp = SPPblock(512) # 池化后再上采样
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_up0 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.e4_up = nn.ConvTranspose2d(516, 256, 2, stride=2)
self.e2_up = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rc_up1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_up3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_up4 = nn.ConvTranspose2d(128, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_Ce = CACblock_with_inception(512)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(64, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(32, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
#
self.d3_conv = nn.Conv2d(384, 128, kernel_size=1, padding=0, stride=1, bias=True)
self.d2_conv = nn.Conv2d(192, 64, kernel_size=1, padding=0, stride=1, bias=True)
self.d1_conv = nn.Conv2d(96, 64, kernel_size=1, padding=0, stride=1, bias=True)
# the decoder of ce_net
self.decoder4 = DecoderBlock(516, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0]) # 解码部分
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) # 逆卷积
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, out_ch, 3, padding=1)
def forward(self, x):
# M_Net Encoder Part
l_x = x
_, _, img_shape, _ = l_x.size()
x_2 = F.upsample(l_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(l_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(l_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out1 = self.down1(l_x) # conv1 [32,512,512]
out1 = torch.cat([self.conv2(x_2), out1], dim=1)
conv2, out2 = self.down2(out1) # conv2 [64,256,256]
out2 = torch.cat([self.conv3(x_3), out2], dim=1)
conv3, out3 = self.down3(out2) # conv3 [128,128,128]
out3 = torch.cat([self.conv4(x_4), out3], dim=1)
conv4, out4 = self.down4(out3) # conv4 [256,64,64]
# cet_out = self.center(out)
# CAC_out = self.CAC(out4)
# CE_Net Encoder part
rx = x
e0 = self.firstconv(rx) #[64,256,256]
e0 = self.firstbn(e0) #[64,256,256]
e0 = self.firstrelu(e0) #[64,256,256]
pe0 = self.firstmaxpool(e0) #[64,128,128]
e1 = self.encoder1(pe0) #[64,128,128]
e2 = self.encoder2(e1) #[128,64,64]
e3 = self.encoder3(e2) #[256,32,32]
e4 = self.encoder4(e3) #[512,16,16]
# Center of CE_Net
# e4 = self.CAC_Ce(e4)
e4 = self.dblock(e4)
e4 = self.spp(e4)
# the center part
e4_up = self.e4_up(e4)
CAC_out = self.CAC(out4)
CAC_out = e4_up + CAC_out
cet_out = self.CAC_conv4(CAC_out)
r1_cat = torch.cat([e3, cet_out], dim=1)
up_out = self.rc_up1(r1_cat)
up5 = self.up5(up_out)
r2_cat = torch.cat([e2, up5], dim=1)
up_out1 = self.rc_up2(r2_cat)
up6 = self.up6(up_out1)
r3_cat = torch.cat([e1, up6], dim=1)
up_out2 = self.rc_up3(r3_cat)
up7 = self.up7(up_out2)
r4_cat = torch.cat([e0, up7], dim=1)
up_out3 = self.rc_up4(r4_cat)
up8 = self.up8(up_out3)
M_Net_out = self.side_8(up8)
# e4 = self.spp(e4)
# Encoder of CE_Net
d4 = self.decoder4(e4) + out4 # [256,32,32]
d3 = self.decoder3(d4) + self.d3_conv(out3) # [128,64,64]
d2 = self.decoder2(d3) + self.d2_conv(out2) # [64,128,128]
d1 = self.decoder1(d2) + self.d1_conv(out1) # [64,256,256]
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
cet_out = self.finalconv3(out)
ave_out = (cet_out + M_Net_out) /2
return F.sigmoid(cet_out),F.sigmoid(M_Net_out),F.sigmoid(ave_out),cet_out,M_Net_out,ave_out
class H_Net_V2(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(H_Net_V2, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(3, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(3, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# ce_net encoder part
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# the center of M_Net
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the center of ce_Net
self.dblock = DACblock(512) # 空洞卷积
self.spp = SPPblock(512) # 池化后再上采样
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_up0 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.e4_up = nn.ConvTranspose2d(516, 256, 2, stride=2)
self.e2_up = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rc_up1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_up3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_up4 = nn.ConvTranspose2d(128, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_Ce = CACblock_with_inception(512)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(64, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(32, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
#
self.d3_conv = nn.Conv2d(384, 128, kernel_size=1, padding=0, stride=1, bias=True)
self.d2_conv = nn.Conv2d(192, 64, kernel_size=1, padding=0, stride=1, bias=True)
self.d1_conv = nn.Conv2d(96, 64, kernel_size=1, padding=0, stride=1, bias=True)
# the decoder of ce_net
self.decoder4 = DecoderBlock(516, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0]) # 解码部分
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) # 逆卷积
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, out_ch, 3, padding=1)
def forward(self, x):
# M_Net Encoder Part
l_x = x
_, _, img_shape, _ = l_x.size()
x_2 = F.upsample(l_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(l_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(l_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out1 = self.down1(l_x) # conv1 [32,512,512]
out1 = torch.cat([self.conv2(x_2), out1], dim=1)
conv2, out2 = self.down2(out1) # conv2 [64,256,256]
out2 = torch.cat([self.conv3(x_3), out2], dim=1)
conv3, out3 = self.down3(out2) # conv3 [128,128,128]
out3 = torch.cat([self.conv4(x_4), out3], dim=1)
conv4, out4 = self.down4(out3) # conv4 [256,64,64]
# cet_out = self.center(out)
# CAC_out = self.CAC(out4)
# CE_Net Encoder part
rx = x
e0 = self.firstconv(rx) #[64,256,256]
e0 = self.firstbn(e0) #[64,256,256]
e0 = self.firstrelu(e0) #[64,256,256]
pe0 = self.firstmaxpool(e0) #[64,128,128]
e1 = self.encoder1(pe0) #[64,128,128]
e2 = self.encoder2(e1) #[128,64,64]
e3 = self.encoder3(e2) #[256,32,32]
e4 = self.encoder4(e3) #[512,16,16]
# Center of CE_Net
e4 = self.CAC_Ce(e4)
# e4 = self.dblock(e4)
e4 = self.spp(e4)
# the center part
e4_up = self.e4_up(e4)
CAC_out = self.CAC(out4)
CAC_out = e4_up + CAC_out
cet_out = self.CAC_conv4(CAC_out)
r1_cat = torch.cat([e3, cet_out], dim=1)
up_out = self.rc_up1(r1_cat)
up5 = self.up5(up_out)
r2_cat = torch.cat([e2, up5], dim=1)
up_out1 = self.rc_up2(r2_cat)
up6 = self.up6(up_out1)
r3_cat = torch.cat([e1, up6], dim=1)
up_out2 = self.rc_up3(r3_cat)
up7 = self.up7(up_out2)
r4_cat = torch.cat([e0, up7], dim=1)
up_out3 = self.rc_up4(r4_cat)
up8 = self.up8(up_out3)
M_Net_out = self.side_8(up8)
# e4 = self.spp(e4)
# Encoder of CE_Net
d4 = self.decoder4(e4) + out4 # [256,32,32]
d3 = self.decoder3(d4) + self.d3_conv(out3) # [128,64,64]
d2 = self.decoder2(d3) + self.d2_conv(out2) # [64,128,128]
d1 = self.decoder1(d2) + self.d1_conv(out1) # [64,256,256]
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
cet_out = self.finalconv3(out)
ave_out = (cet_out + M_Net_out) /2
return F.sigmoid(cet_out),F.sigmoid(M_Net_out),F.sigmoid(ave_out),cet_out,M_Net_out,ave_out
class H_Net_134(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(H_Net_134, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(3, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(3, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# ce_net encoder part
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# the center of M_Net
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the center of ce_Net
self.dblock = DACblock134(512) # 空洞卷积
self.spp = SPPblock(512) # 池化后再上采样
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_up0 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.e4_up = nn.ConvTranspose2d(516, 256, 2, stride=2)
self.e2_up = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rc_up1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_up3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_up4 = nn.ConvTranspose2d(128, 32, 2, stride=2)
# the CAC block
self.CAC = DACblock134(256)
self.CAC_Ce = DACblock134(512)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(64, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(32, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
#
self.d3_conv = nn.Conv2d(384, 128, kernel_size=1, padding=0, stride=1, bias=True)
self.d2_conv = nn.Conv2d(192, 64, kernel_size=1, padding=0, stride=1, bias=True)
self.d1_conv = nn.Conv2d(96, 64, kernel_size=1, padding=0, stride=1, bias=True)
# the decoder of ce_net
self.decoder4 = DecoderBlock(516, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0]) # 解码部分
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) # 逆卷积
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, out_ch, 3, padding=1)
def forward(self, x):
# M_Net Encoder Part
l_x = x
_, _, img_shape, _ = l_x.size()
x_2 = F.upsample(l_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(l_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(l_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out1 = self.down1(l_x) # conv1 [32,512,512]
out1 = torch.cat([self.conv2(x_2), out1], dim=1)
conv2, out2 = self.down2(out1) # conv2 [64,256,256]
out2 = torch.cat([self.conv3(x_3), out2], dim=1)
conv3, out3 = self.down3(out2) # conv3 [128,128,128]
out3 = torch.cat([self.conv4(x_4), out3], dim=1)
conv4, out4 = self.down4(out3) # conv4 [256,64,64]
# cet_out = self.center(out)
# CAC_out = self.CAC(out4)
# CE_Net Encoder part
rx = x
e0 = self.firstconv(rx) #[64,256,256]
e0 = self.firstbn(e0) #[64,256,256]
e0 = self.firstrelu(e0) #[64,256,256]
pe0 = self.firstmaxpool(e0) #[64,128,128]
e1 = self.encoder1(pe0) #[64,128,128]
e2 = self.encoder2(e1) #[128,64,64]
e3 = self.encoder3(e2) #[256,32,32]
e4 = self.encoder4(e3) #[512,16,16]
# Center of CE_Net
e4 = self.CAC_Ce(e4)
# e4 = self.dblock(e4)
e4 = self.spp(e4)
# the center part
e4_up = self.e4_up(e4)
CAC_out = self.CAC(out4)
CAC_out = e4_up + CAC_out
cet_out = self.CAC_conv4(CAC_out)
r1_cat = torch.cat([e3, cet_out], dim=1)
up_out = self.rc_up1(r1_cat)
up5 = self.up5(up_out)
r2_cat = torch.cat([e2, up5], dim=1)
up_out1 = self.rc_up2(r2_cat)
up6 = self.up6(up_out1)
r3_cat = torch.cat([e1, up6], dim=1)
up_out2 = self.rc_up3(r3_cat)
up7 = self.up7(up_out2)
r4_cat = torch.cat([e0, up7], dim=1)
up_out3 = self.rc_up4(r4_cat)
up8 = self.up8(up_out3)
M_Net_out = self.side_8(up8)
# e4 = self.spp(e4)
# Encoder of CE_Net
d4 = self.decoder4(e4) + out4 # [256,32,32]
d3 = self.decoder3(d4) + self.d3_conv(out3) # [128,64,64]
d2 = self.decoder2(d3) + self.d2_conv(out2) # [64,128,128]
d1 = self.decoder1(d2) + self.d1_conv(out1) # [64,256,256]
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
cet_out = self.finalconv3(out)
ave_out = (cet_out + M_Net_out) /2
return F.sigmoid(cet_out),F.sigmoid(M_Net_out),F.sigmoid(ave_out),cet_out,M_Net_out,ave_out
class H_Net_137(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(H_Net_137, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(3, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(3, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# ce_net encoder part
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# the center of M_Net
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the center of ce_Net
self.dblock = DACblock137(512) # 空洞卷积
self.spp = SPPblock(512) # 池化后再上采样
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_up0 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.e4_up = nn.ConvTranspose2d(516, 256, 2, stride=2)
self.e2_up = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rc_up1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_up3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_up4 = nn.ConvTranspose2d(128, 32, 2, stride=2)
# the CAC block
self.CAC = DACblock137(256)
self.CAC_Ce = DACblock137(512)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(64, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(32, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
#
self.d3_conv = nn.Conv2d(384, 128, kernel_size=1, padding=0, stride=1, bias=True)
self.d2_conv = nn.Conv2d(192, 64, kernel_size=1, padding=0, stride=1, bias=True)
self.d1_conv = nn.Conv2d(96, 64, kernel_size=1, padding=0, stride=1, bias=True)
# the decoder of ce_net
self.decoder4 = DecoderBlock(516, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0]) # 解码部分
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) # 逆卷积
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, out_ch, 3, padding=1)
def forward(self, x):
# M_Net Encoder Part
l_x = x
_, _, img_shape, _ = l_x.size()
x_2 = F.upsample(l_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(l_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(l_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out1 = self.down1(l_x) # conv1 [32,512,512]
out1 = torch.cat([self.conv2(x_2), out1], dim=1)
conv2, out2 = self.down2(out1) # conv2 [64,256,256]
out2 = torch.cat([self.conv3(x_3), out2], dim=1)
conv3, out3 = self.down3(out2) # conv3 [128,128,128]
out3 = torch.cat([self.conv4(x_4), out3], dim=1)
conv4, out4 = self.down4(out3) # conv4 [256,64,64]
# cet_out = self.center(out)
# CAC_out = self.CAC(out4)
# CE_Net Encoder part
rx = x
e0 = self.firstconv(rx) #[64,256,256]
e0 = self.firstbn(e0) #[64,256,256]
e0 = self.firstrelu(e0) #[64,256,256]
pe0 = self.firstmaxpool(e0) #[64,128,128]
e1 = self.encoder1(pe0) #[64,128,128]
e2 = self.encoder2(e1) #[128,64,64]
e3 = self.encoder3(e2) #[256,32,32]
e4 = self.encoder4(e3) #[512,16,16]
# Center of CE_Net
e4 = self.CAC_Ce(e4)
# e4 = self.dblock(e4)
e4 = self.spp(e4)
# the center part
e4_up = self.e4_up(e4)
CAC_out = self.CAC(out4)
CAC_out = e4_up + CAC_out
cet_out = self.CAC_conv4(CAC_out)
r1_cat = torch.cat([e3, cet_out], dim=1)
up_out = self.rc_up1(r1_cat)
up5 = self.up5(up_out)
r2_cat = torch.cat([e2, up5], dim=1)
up_out1 = self.rc_up2(r2_cat)
up6 = self.up6(up_out1)
r3_cat = torch.cat([e1, up6], dim=1)
up_out2 = self.rc_up3(r3_cat)
up7 = self.up7(up_out2)
r4_cat = torch.cat([e0, up7], dim=1)
up_out3 = self.rc_up4(r4_cat)
up8 = self.up8(up_out3)
M_Net_out = self.side_8(up8)
# e4 = self.spp(e4)
# Encoder of CE_Net
d4 = self.decoder4(e4) + out4 # [256,32,32]
d3 = self.decoder3(d4) + self.d3_conv(out3) # [128,64,64]
d2 = self.decoder2(d3) + self.d2_conv(out2) # [64,128,128]
d1 = self.decoder1(d2) + self.d1_conv(out1) # [64,256,256]
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
cet_out = self.finalconv3(out)
ave_out = (cet_out + M_Net_out) /2
return F.sigmoid(cet_out),F.sigmoid(M_Net_out),F.sigmoid(ave_out),cet_out,M_Net_out,ave_out
class H_Net_139(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(H_Net_139, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(3, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(3, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# ce_net encoder part
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# the center of M_Net
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the center of ce_Net
self.dblock = DACblock139(512) # 空洞卷积
self.spp = SPPblock(512) # 池化后再上采样
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_up0 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.e4_up = nn.ConvTranspose2d(516, 256, 2, stride=2)
self.e2_up = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rc_up1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_up3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_up4 = nn.ConvTranspose2d(128, 32, 2, stride=2)
# the CAC block
self.CAC = DACblock139(256)
self.CAC_Ce = DACblock139(512)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(64, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(32, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
#
self.d3_conv = nn.Conv2d(384, 128, kernel_size=1, padding=0, stride=1, bias=True)
self.d2_conv = nn.Conv2d(192, 64, kernel_size=1, padding=0, stride=1, bias=True)
self.d1_conv = nn.Conv2d(96, 64, kernel_size=1, padding=0, stride=1, bias=True)
# the decoder of ce_net
self.decoder4 = DecoderBlock(516, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0]) # 解码部分
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) # 逆卷积
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, out_ch, 3, padding=1)
def forward(self, x):
# M_Net Encoder Part
l_x = x
_, _, img_shape, _ = l_x.size()
x_2 = F.upsample(l_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(l_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(l_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out1 = self.down1(l_x) # conv1 [32,512,512]
out1 = torch.cat([self.conv2(x_2), out1], dim=1)
conv2, out2 = self.down2(out1) # conv2 [64,256,256]
out2 = torch.cat([self.conv3(x_3), out2], dim=1)
conv3, out3 = self.down3(out2) # conv3 [128,128,128]
out3 = torch.cat([self.conv4(x_4), out3], dim=1)
conv4, out4 = self.down4(out3) # conv4 [256,64,64]
# cet_out = self.center(out)
# CAC_out = self.CAC(out4)
# CE_Net Encoder part
rx = x
e0 = self.firstconv(rx) #[64,256,256]
e0 = self.firstbn(e0) #[64,256,256]
e0 = self.firstrelu(e0) #[64,256,256]
pe0 = self.firstmaxpool(e0) #[64,128,128]
e1 = self.encoder1(pe0) #[64,128,128]
e2 = self.encoder2(e1) #[128,64,64]
e3 = self.encoder3(e2) #[256,32,32]
e4 = self.encoder4(e3) #[512,16,16]
# Center of CE_Net
e4 = self.CAC_Ce(e4)
# e4 = self.dblock(e4)
e4 = self.spp(e4)
# the center part
e4_up = self.e4_up(e4)
CAC_out = self.CAC(out4)
CAC_out = e4_up + CAC_out
cet_out = self.CAC_conv4(CAC_out)
r1_cat = torch.cat([e3, cet_out], dim=1)
up_out = self.rc_up1(r1_cat)
up5 = self.up5(up_out)
r2_cat = torch.cat([e2, up5], dim=1)
up_out1 = self.rc_up2(r2_cat)
up6 = self.up6(up_out1)
r3_cat = torch.cat([e1, up6], dim=1)
up_out2 = self.rc_up3(r3_cat)
up7 = self.up7(up_out2)
r4_cat = torch.cat([e0, up7], dim=1)
up_out3 = self.rc_up4(r4_cat)
up8 = self.up8(up_out3)
M_Net_out = self.side_8(up8)
# e4 = self.spp(e4)
# Encoder of CE_Net
d4 = self.decoder4(e4) + out4 # [256,32,32]
d3 = self.decoder3(d4) + self.d3_conv(out3) # [128,64,64]
d2 = self.decoder2(d3) + self.d2_conv(out2) # [64,128,128]
d1 = self.decoder1(d2) + self.d1_conv(out1) # [64,256,256]
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
cet_out = self.finalconv3(out)
ave_out = (cet_out + M_Net_out) /2
return F.sigmoid(cet_out),F.sigmoid(M_Net_out),F.sigmoid(ave_out),cet_out,M_Net_out,ave_out
class H_Net(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(H_Net, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(3, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(3, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# ce_net encoder part
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# the center of M_Net
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the center of ce_Net
self.dblock = DACblock(512) # 空洞卷积
self.spp = SPPblock(512) # 池化后再上采样
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_up0 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.e4_up = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.e2_up = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rc_up1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_up3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_up4 = nn.ConvTranspose2d(128, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_Ce = CACblock_with_inception(512)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(64, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(32, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
#
self.d3_conv = nn.Conv2d(384, 128, kernel_size=1, padding=0, stride=1, bias=True)
self.d2_conv = nn.Conv2d(192, 64, kernel_size=1, padding=0, stride=1, bias=True)
self.d1_conv = nn.Conv2d(96, 64, kernel_size=1, padding=0, stride=1, bias=True)
# the decoder of ce_net
self.decoder4 = DecoderBlock(512, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0]) # 解码部分
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) # 逆卷积
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, out_ch, 3, padding=1)
def forward(self, x):
# M_Net Encoder Part
l_x = x
_, _, img_shape, _ = l_x.size()
x_2 = F.upsample(l_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(l_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(l_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out1 = self.down1(l_x) # conv1 [32,512,512]
out1 = torch.cat([self.conv2(x_2), out1], dim=1)
conv2, out2 = self.down2(out1) # conv2 [64,256,256]
out2 = torch.cat([self.conv3(x_3), out2], dim=1)
conv3, out3 = self.down3(out2) # conv3 [128,128,128]
out3 = torch.cat([self.conv4(x_4), out3], dim=1)
conv4, out4 = self.down4(out3) # conv4 [256,64,64]
# cet_out = self.center(out)
# CE_Net Encoder part
rx = x
e0 = self.firstconv(rx) #[64,256,256]
e0 = self.firstbn(e0) #[64,256,256]
e0 = self.firstrelu(e0) #[64,256,256]
pe0 = self.firstmaxpool(e0) #[64,128,128]
e1 = self.encoder1(pe0) #[64,128,128]
e2 = self.encoder2(e1) #[128,64,64]
e3 = self.encoder3(e2) #[256,32,32]
e4 = self.encoder4(e3) #[512,16,16]
# Center of CE_Net
e4 = self.CAC_Ce(e4)
# the center part
e4_up = self.e4_up(e4)
CAC_out = self.CAC(out4)
CAC_out = e4_up + CAC_out
cet_out = self.CAC_conv4(CAC_out)
r1_cat = torch.cat([e3, cet_out], dim=1)
up_out = self.rc_up1(r1_cat)
up5 = self.up5(up_out)
r2_cat = torch.cat([e2, up5], dim=1)
up_out1 = self.rc_up2(r2_cat)
up6 = self.up6(up_out1)
r3_cat = torch.cat([e1, up6], dim=1)
up_out2 = self.rc_up3(r3_cat)
up7 = self.up7(up_out2)
r4_cat = torch.cat([e0, up7], dim=1)
up_out3 = self.rc_up4(r4_cat)
up8 = self.up8(up_out3)
M_Net_out = self.side_8(up8)
# e4 = self.spp(e4)
# Encoder of CE_Net
d4 = self.decoder4(e4) + out4 # [256,32,32]
d3 = self.decoder3(d4) + self.d3_conv(out3) # [128,64,64]
d2 = self.decoder2(d3) + self.d2_conv(out2) # [64,128,128]
d1 = self.decoder1(d2) + self.d1_conv(out1) # [64,256,256]
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
cet_out = self.finalconv3(out)
ave_out = (cet_out + M_Net_out) /2
return F.sigmoid(cet_out),F.sigmoid(M_Net_out),F.sigmoid(ave_out),cet_out,M_Net_out,ave_out
class H_Net_ori(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(H_Net_ori, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(3, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(3, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# ce_net encoder part
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# the center of M_Net
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the center of ce_Net
self.dblock = DACblock(512) # 空洞卷积
self.spp = SPPblock(512) # 池化后再上采样
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_up0 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.e3_up = nn.ConvTranspose2d(256, 256, 2, stride=2)
self.e2_up = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rc_up1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_up3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_up4 = nn.ConvTranspose2d(128, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(64, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(32, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
#
self.d3_conv = nn.Conv2d(384, 128, kernel_size=1, padding=0, stride=1, bias=True)
self.d2_conv = nn.Conv2d(192, 64, kernel_size=1, padding=0, stride=1, bias=True)
self.d1_conv = nn.Conv2d(96, 64, kernel_size=1, padding=0, stride=1, bias=True)
# the decoder of ce_net
self.decoder4 = DecoderBlock(512, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0]) # 解码部分
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) # 逆卷积
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, out_ch, 3, padding=1)
def forward(self, x):
# M_Net Encoder Part
l_x = x
_, _, img_shape, _ = l_x.size()
x_2 = F.upsample(l_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(l_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(l_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out1 = self.down1(l_x) # conv1 [32,512,512]
out1 = torch.cat([self.conv2(x_2), out1], dim=1)
conv2, out2 = self.down2(out1) # conv2 [64,256,256]
out2 = torch.cat([self.conv3(x_3), out2], dim=1)
conv3, out3 = self.down3(out2) # conv3 [128,128,128]
out3 = torch.cat([self.conv4(x_4), out3], dim=1)
conv4, out4 = self.down4(out3) # conv4 [256,64,64]
# cet_out = self.center(out)
# CE_Net Encoder part
rx = x
e0 = self.firstconv(rx) #[64,256,256]
e0 = self.firstbn(e0) #[64,256,256]
e0 = self.firstrelu(e0) #[64,256,256]
pe0 = self.firstmaxpool(e0) #[64,128,128]
e1 = self.encoder1(pe0) #[64,128,128]
e2 = self.encoder2(e1) #[128,64,64]
e3 = self.encoder3(e2) #[256,32,32]
e4 = self.encoder4(e3) #[512,16,16]
# the center part
CAC_out = self.CAC(out4)
cet_out = self.CAC_conv4(CAC_out)
r1_cat = torch.cat([e3, cet_out], dim=1)
up_out = self.rc_up1(r1_cat)
up5 = self.up5(up_out)
r2_cat = torch.cat([e2, up5], dim=1)
up_out1 = self.rc_up2(r2_cat)
up6 = self.up6(up_out1)
r3_cat = torch.cat([e1, up6], dim=1)
up_out2 = self.rc_up3(r3_cat)
up7 = self.up7(up_out2)
r4_cat = torch.cat([e0, up7], dim=1)
up_out3 = self.rc_up4(r4_cat)
up8 = self.up8(up_out3)
M_Net_out = self.side_8(up8)
# center of CE_Net
e4 = self.dblock(e4)
e4 = self.spp(e4)
# Encoder of CE_Net
d4 = self.decoder4(e4) + out4 # [256,32,32]
d3 = self.decoder3(d4) + self.d3_conv(out3) # [128,64,64]
d2 = self.decoder2(d3) + self.d2_conv(out2) # [64,128,128]
d1 = self.decoder1(d2) + self.d1_conv(out1) # [64,256,256]
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
cet_out = self.finalconv3(out)
ave_out = (cet_out + M_Net_out)/2
return F.sigmoid(ave_out)
class DecoderBlock(nn.Module):
def __init__(self, in_channels, n_filters):
super(DecoderBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
class SPPblock(nn.Module):
def __init__(self, in_channels):
super(SPPblock, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3)
self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5)
self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0)
def forward(self, x):
self.in_channels, h, w = x.size(1), x.size(2), x.size(3)
self.layer1 = F.upsample(self.conv(self.pool1(x)), size=(h, w), mode='bilinear')
self.layer2 = F.upsample(self.conv(self.pool2(x)), size=(h, w), mode='bilinear')
self.layer3 = F.upsample(self.conv(self.pool3(x)), size=(h, w), mode='bilinear')
self.layer4 = F.upsample(self.conv(self.pool4(x)), size=(h, w), mode='bilinear')
out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4, x], 1)
return out
class DACblock(nn.Module):
def __init__(self, channel):
super(DACblock, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=5, padding=5)
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class DACblock134(nn.Module):
def __init__(self, channel):
super(DACblock134, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=4, padding=4)
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class DACblock137(nn.Module):
def __init__(self, channel):
super(DACblock137, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=7, padding=7)
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class DACblock139(nn.Module):
def __init__(self, channel):
super(DACblock139, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=9, padding=9)
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class M_Net(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(M_Net, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(in_ch, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(in_ch, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(in_ch, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_down1 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.rc_down2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_down3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_down4 = nn.ConvTranspose2d(64, 32, 2, stride=2)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
# self.gf = FastGuidedFilter_attention(r=2, eps=1e-2)
#
# # attention blocks
# self.attentionblock5 = GridAttentionBlock(in_channels=512)
# self.attentionblock6 = GridAttentionBlock(in_channels=256)
# self.attentionblock7 = GridAttentionBlock(in_channels=128)
# self.attentionblock8 = GridAttentionBlock(in_channels=64)
def forward(self, x):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
cet_out = self.center(out)
up_out = self.rc_down1(cet_out)
r1_cat = torch.cat([conv4, up_out], dim=1)
up5 = self.up5(r1_cat)
up_out = self.rc_down2(up5)
r2_cat = torch.cat([conv3,up_out],dim = 1)
up6 = self.up6(r2_cat)
up_out = self.rc_down3(up6)
r3_cat = torch.cat([conv2,up_out],dim = 1)
up7 = self.up7(r3_cat)
up_out = self.rc_down4(up7)
r4_cat = torch.cat([conv1,up_out],dim = 1)
up8 = self.up8(r4_cat)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class M_Net_CAC_with_CAM(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(M_Net_CAC_with_CAM, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(in_ch, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(in_ch, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(in_ch, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_down1 = nn.ConvTranspose2d(256, 256, 2, stride=2)
self.rc_down2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_down3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_down4 = nn.ConvTranspose2d(64, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_conv4 = M_Conv(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the CAM block
self.p_CAM = nn.MaxPool2d(16)
# self.CAM_conv = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.CAM_conv = M_Conv(1, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
def forward(self, x,cam_x):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
# cet_out = self.center(out)
CAC_out = self.CAC(out)
# the CAC block concat with our CAM
cam = self.p_CAM(cam_x)
cam_conv = self.CAM_conv(cam)
cam_cac = torch.cat([CAC_out,cam_conv],dim=1)
cet_out = self.CAC_conv4(cam_cac)
up_out = self.rc_down1(cet_out)
r1_cat = torch.cat([conv4, up_out], dim=1)
up5 = self.up5(r1_cat)
up_out = self.rc_down2(up5)
r2_cat = torch.cat([conv3,up_out],dim = 1)
up6 = self.up6(r2_cat)
up_out = self.rc_down3(up6)
r3_cat = torch.cat([conv2,up_out],dim = 1)
up7 = self.up7(r3_cat)
up_out = self.rc_down4(up7)
r4_cat = torch.cat([conv1,up_out],dim = 1)
up8 = self.up8(r4_cat)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class M_Net_CAC_with_CAM_Slide(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(M_Net_CAC_with_CAM_Slide, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(1, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(1, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(1, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_down1 = nn.ConvTranspose2d(256, 256, 2, stride=2)
self.rc_down2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_down3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_down4 = nn.ConvTranspose2d(64, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_conv4 = M_Conv(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the CAM block
self.p_CAM = nn.MaxPool2d(16)
# self.CAM_conv = M_Conv(3, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.CAM_conv = M_Conv(1, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
def forward(self, x,cam_x):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(cam_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(cam_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(cam_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
# cet_out = self.center(out)
CAC_out = self.CAC(out)
# the CAC block concat with our CAM
cam = self.p_CAM(cam_x)
cam_conv = self.CAM_conv(cam)
cam_cac = torch.cat([CAC_out,cam_conv],dim=1)
cet_out = self.CAC_conv4(cam_cac)
up_out = self.rc_down1(cet_out)
r1_cat = torch.cat([conv4, up_out], dim=1)
up5 = self.up5(r1_cat)
up_out = self.rc_down2(up5)
r2_cat = torch.cat([conv3,up_out],dim = 1)
up6 = self.up6(r2_cat)
up_out = self.rc_down3(up6)
r3_cat = torch.cat([conv2,up_out],dim = 1)
up7 = self.up7(r3_cat)
up_out = self.rc_down4(up7)
r4_cat = torch.cat([conv1,up_out],dim = 1)
up8 = self.up8(r4_cat)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class M_Net_CAC_slide(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(M_Net_CAC_slide, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(1, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(1, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(1, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_down1 = nn.ConvTranspose2d(256, 256, 2, stride=2)
self.rc_down2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_down3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_down4 = nn.ConvTranspose2d(64, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
# self.gf = FastGuidedFilter_attention(r=2, eps=1e-2)
#
# # attention blocks
# self.attentionblock5 = GridAttentionBlock(in_channels=512)
# self.attentionblock6 = GridAttentionBlock(in_channels=256)
# self.attentionblock7 = GridAttentionBlock(in_channels=128)
# self.attentionblock8 = GridAttentionBlock(in_channels=64)
def forward(self, x,cam_x):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(cam_x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(cam_x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(cam_x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
# cet_out = self.center(out)
CAC_out = self.CAC(out)
cet_out = self.CAC_conv4(CAC_out)
up_out = self.rc_down1(cet_out)
r1_cat = torch.cat([conv4, up_out], dim=1)
up5 = self.up5(r1_cat)
up_out = self.rc_down2(up5)
r2_cat = torch.cat([conv3,up_out],dim = 1)
up6 = self.up6(r2_cat)
up_out = self.rc_down3(up6)
r3_cat = torch.cat([conv2,up_out],dim = 1)
up7 = self.up7(r3_cat)
up_out = self.rc_down4(up7)
r4_cat = torch.cat([conv1,up_out],dim = 1)
up8 = self.up8(r4_cat)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class M_Net_CAC(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(M_Net_CAC, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(in_ch, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(in_ch, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(in_ch, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_down1 = nn.ConvTranspose2d(256, 256, 2, stride=2)
self.rc_down2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_down3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_down4 = nn.ConvTranspose2d(64, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception(256)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
# self.gf = FastGuidedFilter_attention(r=2, eps=1e-2)
#
# # attention blocks
# self.attentionblock5 = GridAttentionBlock(in_channels=512)
# self.attentionblock6 = GridAttentionBlock(in_channels=256)
# self.attentionblock7 = GridAttentionBlock(in_channels=128)
# self.attentionblock8 = GridAttentionBlock(in_channels=64)
def forward(self, x):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
# cet_out = self.center(out)
CAC_out = self.CAC(out)
cet_out = self.CAC_conv4(CAC_out)
up_out = self.rc_down1(cet_out)
r1_cat = torch.cat([conv4, up_out], dim=1)
up5 = self.up5(r1_cat)
up_out = self.rc_down2(up5)
r2_cat = torch.cat([conv3,up_out],dim = 1)
up6 = self.up6(r2_cat)
up_out = self.rc_down3(up6)
r3_cat = torch.cat([conv2,up_out],dim = 1)
up7 = self.up7(r3_cat)
up_out = self.rc_down4(up7)
r4_cat = torch.cat([conv1,up_out],dim = 1)
up8 = self.up8(r4_cat)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class M_Net_CAC_with_Coarse(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(M_Net_CAC_with_Coarse, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(in_ch, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(in_ch, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(in_ch, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# self.rw_up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.rc_down1 = nn.ConvTranspose2d(256, 256, 2, stride=2)
self.rc_down2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.rc_down3 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.rc_down4 = nn.ConvTranspose2d(64, 32, 2, stride=2)
# the CAC block
self.CAC = CACblock_with_inception_blocks(256)
self.CAC_conv4 = M_Conv(256, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
# self.gf = FastGuidedFilter_attention(r=2, eps=1e-2)
#
# # attention blocks
# self.attentionblock5 = GridAttentionBlock(in_channels=512)
# self.attentionblock6 = GridAttentionBlock(in_channels=256)
# self.attentionblock7 = GridAttentionBlock(in_channels=128)
# self.attentionblock8 = GridAttentionBlock(in_channels=64)
def forward(self, x,cam):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
# cet_out = self.center(out)
CAC_out = self.CAC(out)
cet_out = self.CAC_conv4(CAC_out)
up_out = self.rc_down1(cet_out)
r1_cat = torch.cat([conv4, up_out], dim=1)
up5 = self.up5(r1_cat)
up_out = self.rc_down2(up5)
r2_cat = torch.cat([conv3,up_out],dim = 1)
up6 = self.up6(r2_cat)
up_out = self.rc_down3(up6)
r3_cat = torch.cat([conv2,up_out],dim = 1)
up7 = self.up7(r3_cat)
up_out = self.rc_down4(up7)
r4_cat = torch.cat([conv1,up_out],dim = 1)
up8 = self.up8(r4_cat)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class AG_Net_EASPP(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(AG_Net_EASPP, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(in_ch, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(in_ch, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(in_ch, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the CAC block
# self.CAC = CACblock_with_inception_blocks(512)
self.CAC = CACblock_with_inception(512)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.gf = FastGuidedFilter_attention(r=2, eps=1e-2)
# attention blocks
self.attentionblock5 = GridAttentionBlock(in_channels=512)
self.attentionblock6 = GridAttentionBlock(in_channels=256)
self.attentionblock7 = GridAttentionBlock(in_channels=128)
self.attentionblock8 = GridAttentionBlock(in_channels=64)
def forward(self, x):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
out = self.center(out)
CAC_out = self.CAC(out)
FG = torch.cat([self.conv4(x_4), conv4], dim=1)
N, C, H, W= FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, CAC_out, FG,self.attentionblock5(FG_small,out))
up5 = self.up5(out)
FG = torch.cat([self.conv3(x_3), conv3], dim=1)
N, C, H, W = FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, up5, FG,self.attentionblock6(FG_small,up5))
up6 = self.up6(out)
FG = torch.cat([self.conv2(x_2), conv2], dim=1)
N, C, H, W = FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, up6, FG,self.attentionblock7(FG_small,up6))
up7 = self.up7(out)
FG = torch.cat([conv1, conv1], dim=1)
N, C, H, W = FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, up7, FG,self.attentionblock8(FG_small,up7))
up8 = self.up8(out)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class AG_Net(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, BatchNorm=False):
super(AG_Net, self).__init__()
# mutli-scale simple convolution
self.conv2 = M_Conv(in_ch, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv3 = M_Conv(in_ch, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.conv4 = M_Conv(in_ch, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the down convolution contain concat operation
self.down1 = M_Encoder(in_ch, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 512
self.down2 = M_Encoder(64 + 32, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 256
self.down3 = M_Encoder(128 + 64, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 128
self.down4 = M_Encoder(256 + 128, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm) # 64
# the center
self.center = M_Encoder(256, 512, kernel_size=3, pooling=False)
# the up convolution contain concat operation
self.up5 = M_Decoder_my_10(512, 256, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up6 = M_Decoder_my_10(256, 128, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up7 = M_Decoder_my_10(128, 64, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
self.up8 = M_Decoder_my_10(64, 32, kernel_size=3, bn=bn, BatchNorm=BatchNorm)
# the sideoutput
self.side_5 = nn.Conv2d(256, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_6 = nn.Conv2d(128, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_7 = nn.Conv2d(64, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.side_8 = nn.Conv2d(32, out_ch, kernel_size=1, padding=0, stride=1, bias=True)
self.gf = FastGuidedFilter_attention(r=2, eps=1e-2)
# attention blocks
self.attentionblock5 = GridAttentionBlock(in_channels=512)
self.attentionblock6 = GridAttentionBlock(in_channels=256)
self.attentionblock7 = GridAttentionBlock(in_channels=128)
self.attentionblock8 = GridAttentionBlock(in_channels=64)
def forward(self, x):
_, _, img_shape, _ = x.size()
x_2 = F.upsample(x, size=(int(img_shape / 2), int(img_shape / 2)), mode='bilinear')
x_3 = F.upsample(x, size=(int(img_shape / 4), int(img_shape / 4)), mode='bilinear')
x_4 = F.upsample(x, size=(int(img_shape / 8), int(img_shape / 8)), mode='bilinear')
conv1, out = self.down1(x)
out = torch.cat([self.conv2(x_2), out], dim=1)
conv2, out = self.down2(out)
out = torch.cat([self.conv3(x_3), out], dim=1)
conv3, out = self.down3(out)
out = torch.cat([self.conv4(x_4), out], dim=1)
conv4, out = self.down4(out)
out = self.center(out)
FG = torch.cat([self.conv4(x_4), conv4], dim=1)
N, C, H, W= FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, out, FG,self.attentionblock5(FG_small,out))
up5 = self.up5(out)
FG = torch.cat([self.conv3(x_3), conv3], dim=1)
N, C, H, W = FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, up5, FG,self.attentionblock6(FG_small,up5))
up6 = self.up6(out)
FG = torch.cat([self.conv2(x_2), conv2], dim=1)
N, C, H, W = FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, up6, FG,self.attentionblock7(FG_small,up6))
up7 = self.up7(out)
FG = torch.cat([conv1, conv1], dim=1)
N, C, H, W = FG.size()
FG_small = F.upsample(FG, size=(int(H/2), int(W/2)), mode='bilinear')
out = self.gf(FG_small, up7, FG,self.attentionblock8(FG_small,up7))
up8 = self.up8(out)
side_5 = F.upsample(up5, size=(img_shape, img_shape), mode='bilinear')
side_6 = F.upsample(up6, size=(img_shape, img_shape), mode='bilinear')
side_7 = F.upsample(up7, size=(img_shape, img_shape), mode='bilinear')
side_8 = F.upsample(up8, size=(img_shape, img_shape), mode='bilinear')
side_5 = self.side_5(side_5)
side_6 = self.side_6(side_6)
side_7 = self.side_7(side_7)
side_8 = self.side_8(side_8)
ave_out = (side_5+side_6+side_7+side_8)/4
return [ave_out, side_5, side_6, side_7, side_8]
class CACblock_with_inception_blocks(nn.Module):
def __init__(self, channel):
super(CACblock_with_inception_blocks, self).__init__()
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
self.conv3x3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.conv5x5 = nn.Conv2d(channel, channel, kernel_size=5, dilation=1, padding=2)
self.conv7x7 = nn.Conv2d(channel, channel, kernel_size=7, dilation=1, padding=3)
self.pooling = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.conv1x1(x))
dilate2_out = nonlinearity(self.conv3x3(self.conv1x1(x)))
dilate3_out = nonlinearity(self.conv5x5(self.conv1x1(x)))
dilate4_out = nonlinearity(self.conv7x7(self.conv1x1(x)))
dilate5_out = self.pooling(x)
out = dilate1_out + dilate2_out + dilate3_out + dilate4_out + dilate5_out
return out
class CACblock_with_inception(nn.Module): # 1X1,3X3,5X5
def __init__(self, channel):
super(CACblock_with_inception, self).__init__()
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
self.conv3x3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.conv5x5 = nn.Conv2d(channel, channel, kernel_size=5, dilation=1, padding=2)
# self.conv7x7 = nn.Conv2d(channel, channel, kernel_size=7, dilation=1, padding=3)
self.pooling = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.conv1x1(x))
dilate2_out = nonlinearity(self.conv3x3(self.conv1x1(x)))
dilate3_out = nonlinearity(self.conv5x5(self.conv1x1(x)))
# dilate4_out = nonlinearity(self.conv7x7(self.conv1x1(x)))
dilate4_out = self.pooling(x)
out = dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class ConvBnRelu2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, dilation=1, stride=1, groups=1, is_bn=False, BatchNorm=False, is_relu=True, num_groups=32):
super(ConvBnRelu2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=False)
if BatchNorm:
self.bn = nn.BatchNorm2d(out_channels, eps=BN_EPS)
self.relu = nn.ReLU(inplace=True)
if is_bn:
if out_channels//num_groups==0:
num_groups=1
self.gn =nn.GroupNorm(num_groups, out_channels, eps=BN_EPS)
self.is_bn = is_bn
self.is_BatchNorm=BatchNorm
if is_relu is False: self.relu=None
def forward(self,x):
x = self.conv(x)
if self.is_BatchNorm: x = self.bn(x)
if self.is_bn: x = self.gn(x)
if self.relu is not None: x = self.relu(x)
return x
class StackEncoder (nn.Module):
def __init__(self, x_channels, y_channels, kernel_size=3, dilation=1, bn=False, BatchNorm=False, num_groups=32):
super(StackEncoder, self).__init__()
padding=(dilation*kernel_size-1)//2
self.encode = nn.Sequential(
ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
)
def forward(self,x):
y = self.encode(x)
y_small = F.max_pool2d(y, kernel_size=2, stride=2)
return y, y_small
class StackDecoder (nn.Module):
def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3, dilation=1, bn=False, BatchNorm=False, num_groups=32):
super(StackDecoder, self).__init__()
padding=(dilation*kernel_size-1)//2
self.decode = nn.Sequential(
ConvBnRelu2d(x_big_channels+x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
)
def forward(self, x_big, x):
N,C,H,W = x_big.size()
y = F.upsample(x, size=(H,W),mode='bilinear')
#y = F.upsample(x, scale_factor=2,mode='bilinear')
y = torch.cat([y,x_big],1)
y = self.decode(y)
return y
class M_Encoder(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size=3, dilation=1, pooling=True, bn=False, BatchNorm=False, num_groups=32):
super(M_Encoder, self).__init__()
padding =(dilation*kernel_size-1)//2
self.encode = nn.Sequential(
ConvBnRelu2d(input_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
)
self.pooling = pooling
def forward(self, x):
conv = self.encode(x)
if self.pooling:
pool = F.max_pool2d(conv, kernel_size=2, stride=2)
return conv,pool
else:
return conv
class M_Conv(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size=3, dilation=1, pooling=True, bn=False, BatchNorm=False, num_groups=32):
super(M_Conv, self).__init__()
padding =(dilation*kernel_size-1)//2
self.encode = nn.Sequential(
nn.Conv2d(input_channels, output_channels,kernel_size=kernel_size, padding=1, stride=1),
nn.ReLU(inplace=True),
)
def forward(self, x):
conv = self.encode(x)
return conv
class M_Decoder(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size=3, dilation=1, deconv = False, bn=False, BatchNorm=False, num_groups=32):
super(M_Decoder, self).__init__()
padding =(dilation*kernel_size-1)//2
if deconv:
self.deconv = nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride=1, padding=1),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation,
stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1,is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
)
else:
self.deconv = False
self.decode = nn.Sequential(
ConvBnRelu2d(input_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
)
def forward(self, x_big, x):
N,C,H,W = x_big.size()
out = F.upsample(x, size=(H,W),mode='bilinear')
out = torch.cat([x_big,out], dim=1)
if self.deconv:
out = self.deconv(out)
else:
out = self.decode(out)
return out
class M_Decoder_my_10(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size=3, dilation=1, deconv = False, bn=False, BatchNorm=False, num_groups=32):
super(M_Decoder_my_10, self).__init__()
padding =(dilation*kernel_size-1)//2
if deconv:
self.deconv = nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride=1, padding=1),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation,
stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1,is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
)
else:
self.deconv = False
self.decode = nn.Sequential(
ConvBnRelu2d(input_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
ConvBnRelu2d(output_channels, output_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, stride=1, groups=1, is_bn=bn,BatchNorm=BatchNorm, num_groups=num_groups),
)
def forward(self,x):
x = self.decode(x)
return x
class GridAttentionBlock(nn.Module):
def __init__(self, in_channels):
super(GridAttentionBlock, self).__init__()
self.inter_channels = in_channels
self.in_channels = in_channels
self.gating_channels = in_channels
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1)
self.phi = nn.Conv2d(in_channels=self.gating_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0, bias=True)
self.psi = nn.Conv2d(in_channels=self.inter_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode='bilinear')
f = F.relu(theta_x + phi_g, inplace=True)
sigm_psi_f = F.sigmoid(self.psi(f))
return sigm_psi_f
class FastGuidedFilter_attention(nn.Module):
def __init__(self, r, eps=1e-8):
super(FastGuidedFilter_attention, self).__init__()
self.r = r
self.eps = eps
self.boxfilter = BoxFilter(r)
self.epss = 1e-12
def forward(self, lr_x, lr_y, hr_x, l_a):
n_lrx, c_lrx, h_lrx, w_lrx = lr_x.size()
n_lry, c_lry, h_lry, w_lry = lr_y.size()
n_hrx, c_hrx, h_hrx, w_hrx = hr_x.size()
lr_x = lr_x.double()
lr_y = lr_y.double()
hr_x = hr_x.double()
l_a = l_a.double()
assert n_lrx == n_lry and n_lry == n_hrx
assert c_lrx == c_hrx and (c_lrx == 1 or c_lrx == c_lry)
assert h_lrx == h_lry and w_lrx == w_lry
assert h_lrx > 2*self.r+1 and w_lrx > 2*self.r+1
## N
N = self.boxfilter(Variable(lr_x.data.new().resize_((1, 1, h_lrx, w_lrx)).fill_(1.0)))
# l_a = torch.abs(l_a)
l_a = torch.abs(l_a) + self.epss
t_all = torch.sum(l_a)
l_t = l_a / t_all
## mean_attention
mean_a = self.boxfilter(l_a) / N
## mean_a^2xy
mean_a2xy = self.boxfilter(l_a * l_a * lr_x * lr_y) / N
## mean_tax
mean_tax = self.boxfilter(l_t * l_a * lr_x) / N
## mean_ay
mean_ay = self.boxfilter(l_a * lr_y) / N
## mean_a^2x^2
mean_a2x2 = self.boxfilter(l_a * l_a * lr_x * lr_x) / N
## mean_ax
mean_ax = self.boxfilter(l_a * lr_x) / N
## A
temp = torch.abs(mean_a2x2 - N * mean_tax * mean_ax)
A = (mean_a2xy - N * mean_tax * mean_ay) / (temp + self.eps)
## b
b = (mean_ay - A * mean_ax) / (mean_a)
# --------------------------------
# Mean
# --------------------------------
A = self.boxfilter(A) / N
b = self.boxfilter(b) / N
## mean_A; mean_b
mean_A = F.upsample(A, (h_hrx, w_hrx), mode='bilinear')
mean_b = F.upsample(b, (h_hrx, w_hrx), mode='bilinear')
return (mean_A*hr_x+mean_b).float()
def diff_x(input, r):
assert input.dim() == 4
left = input[:, :, r:2 * r + 1]
middle = input[:, :, 2 * r + 1: ] - input[:, :, :-2 * r - 1]
right = input[:, :, -1: ] - input[:, :, -2 * r - 1: -r - 1]
output = torch.cat([left, middle, right], dim=2)
return output
def diff_y(input, r):
assert input.dim() == 4
left = input[:, :, :, r:2 * r + 1]
middle = input[:, :, :, 2 * r + 1: ] - input[:, :, :, :-2 * r - 1]
right = input[:, :, :, -1: ] - input[:, :, :, -2 * r - 1: -r - 1]
output = torch.cat([left, middle, right], dim=3)
return output
class BoxFilter(nn.Module):
def __init__(self, r):
super(BoxFilter, self).__init__()
self.r = r
def forward(self, x):
assert x.dim() == 4
return diff_y(diff_x(x.cumsum(dim=2), self.r).cumsum(dim=3), self.r)
if __name__ == '__main__':
from torchstat import stat
# model = H_Net(3,2,bn=True, BatchNorm=False)
model = create_model(arch='DeepLabV3Plus', encoder_name="efficientnet-b0", encoder_weights= "imagenet",
in_channels = 3, classes = 2)
eff = smp.encoders.get_encoder(name='efficientnet-b0', in_channels=3, depth=4, weights=None)
a = torch.rand((2, 3, 512, 512))
print(model.encoder._blocks)
| 46.961418
| 199
| 0.612073
| 15,640
| 104,677
| 3.904859
| 0.021356
| 0.059929
| 0.039625
| 0.071326
| 0.939776
| 0.931802
| 0.92404
| 0.918277
| 0.913528
| 0.907503
| 0
| 0.088067
| 0.25466
| 104,677
| 2,229
| 200
| 46.961418
| 0.694706
| 0.086781
| 0
| 0.841418
| 0
| 0
| 0.00873
| 0
| 0
| 0
| 0
| 0
| 0.004975
| 1
| 0.042289
| false
| 0
| 0.005597
| 0
| 0.090796
| 0.000622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c20e96a238407a3228f47417673dd8aa9b253900
| 545
|
py
|
Python
|
src/adapt/project-id.py
|
CaioSR/deps-ahoy
|
730a1ed16f5d48f654e5a85c2366f3e56aebc9d9
|
[
"MIT"
] | null | null | null |
src/adapt/project-id.py
|
CaioSR/deps-ahoy
|
730a1ed16f5d48f654e5a85c2366f3e56aebc9d9
|
[
"MIT"
] | null | null | null |
src/adapt/project-id.py
|
CaioSR/deps-ahoy
|
730a1ed16f5d48f654e5a85c2366f3e56aebc9d9
|
[
"MIT"
] | null | null | null |
import csv
with open('spring/aNodes.csv', 'r') as rF, open('spring/aNodes_new.csv', 'w', newline='') as wF:
reader = csv.reader(rF)
for node in reader:
node.append([1])
writer = csv.writer(wF)
writer.writerow(node)
rF.close()
wF.close()
with open('shiro/aNodes.csv', 'r') as rF, open('shiro/aNodes_new.csv', 'w', newline='') as wF:
reader = csv.reader(rF)
for node in reader:
node.append([2])
writer = csv.writer(wF)
writer.writerow(node)
rF.close()
wF.close()
| 24.772727
| 96
| 0.581651
| 80
| 545
| 3.9375
| 0.3
| 0.050794
| 0.101587
| 0.07619
| 0.844444
| 0.844444
| 0.730159
| 0.730159
| 0.730159
| 0.730159
| 0
| 0.004866
| 0.245872
| 545
| 21
| 97
| 25.952381
| 0.761557
| 0
| 0
| 0.705882
| 0
| 0
| 0.143119
| 0.038532
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df9fb662a3a7ad6e1fac83b218e03ec76f821c84
| 13,108
|
py
|
Python
|
bvai.py
|
Shahin3010/Shahinvai
|
6128c4e9883d5da35f1b43ace6e68d972034b6cd
|
[
"MIT"
] | null | null | null |
bvai.py
|
Shahin3010/Shahinvai
|
6128c4e9883d5da35f1b43ace6e68d972034b6cd
|
[
"MIT"
] | null | null | null |
bvai.py
|
Shahin3010/Shahinvai
|
6128c4e9883d5da35f1b43ace6e68d972034b6cd
|
[
"MIT"
] | null | null | null |
# bvai v1.0
# Contact Me On Facebook For Using This Tool
# Coded By Shahin Islam
# If You Wanna Take Credits For This Code, Please Look Yourself Again...
# Reserved2020
# Powered by Shahin islam
import base64
exec(base64.b32decode('EMQGE5TBNEQHMMJOGAFCGICDN5XHIYLDOQQE2ZJAJ5XCARTBMNSWE33PNMQEM33SEBKXG2LOM4QFI2DJOMQFI33PNQFCGICDN5SGKZBAIJ4SAQTPORXWYTLFNBSWI2IKEMQESZRALFXXKICXMFXG4YJAKRQWWZJAINZGKZDJORZSARTPOIQFI2DJOMQEG33EMUWCAUDMMVQXGZJAJRXW62ZALFXXK4TTMVWGMICBM5QWS3ROFYXAUIZAKJSXGZLSOZSWIMRQGIYAUIZAKBXXOZLSMVSCAYTZEBKGKYLNEBLFMSKSKVJQU2LNOBXXE5BANVQXE43IMFWAUZLYMVRSQ3LBOJZWQYLMFZWG6YLEOMUCOY24PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBTLR4DAMC4PAYDAXDYGAYEAXDYGAYFY6BQGBOHQMBQONBFY6BQGBOHQMBQLR4DAMDELR4DAMC4PAYDAZC4PAYDCXDYGAYGYXDYGAYFY6BQGBNFY6BQGBOHQMBQMROHQMBQLR4DAMDELR4DAMK4PAYDA3C4PAYDCXDYGAYFUXDYGAYVY6BQGBSFY6BQGBOHQMBQMROHQMBRLR4DAMDMLR4DAMS4PAYDAWS4PAYDEXDYGAYGKXDYGAYVY6BQGBVFY6BQGNOHQMBQMVOHQMBSLR4DAMDKLR4DANC4PAYDAZC4PAYDEXDYGAYFY6BYGNOHQMBRLR4DAMC4PA4DGXDYGAYVY6BQGBSFY6BQGFOHQMBQLR4DANCVMROHQMBRLR4DAMCTFBOHQMBTLR4DAMC4PAYDAXDYGAYGSXDYMZTFY6DGMZOHQZTGLR4GMZSOOQYFY6BRMNOHQMBQLR4DAMBXHA4UGMRVHE4EGOKCGIZDKQRXG44UCRCFG43UCOCBGEZUMNRUGQ4ECNJQGMYDOQZRGNCEIMJYGA4TEMBRGM3UIOJTGY4DCM2CGNATENZYGZBTGMJUINBDCMKDGJBTKRJVIRBUENCGIVDEERRXGQZDOMZVIQ4DMNZSG4ZTMRRSGZDEINSGIE2TMRSEIVBEGNZYIY3UMRSGIYZEMM2DG5CEMRSGINDDARSGIZDDMOBXG5DEMRBRGA2UMN2GIZCEMRCDII2UMN2FIZDDAOBZIYYUMRJZIQ3TQRSFIJBUENKGGNCTORSGINDECRRVGNDDGRSBIVAUMQSGG5CEMNBWIYZUMRSGIZATCRRRIZCUIQKGGNDDORSGII3DQNKGIJDEMRKGGJBUMMSGIZDEMNCGHFCDGQZXIY4TQRRYIZDDGRKCIVDDKRRXIYYDSOKGG5DEMRCGINCTSNRXIY4EIOCGINDEEQZXIZBEMNKGIY3UKQ2DGJDEERSGIVDECRJVIEZTORCGHFBUMQ2GGBDEMMJRIJDEGRJVIY3TMRSGIYZUGRJWHFDDANZXIM3UMRRSINDDCRSGHE4TOQ2GGJDEIRRTIUZUMRSDIY3TKRRYGAZDON2FIZBEMNSGHFBUEQSGG5DEMMCGIZDDQRRSIYZUCRRTIZDEGRRQGAZTCQ2GGQ3UGN2FIZBEMQ2FIRCUMOKGGNDDCRCGIY3TMN2DIJDDKQSGIVDECRSGGNCUKMKBGA4EMMKGIZDEMMKEGEZUKQSGIZBUKRBWGMYEGRRTIY2EMQJYG44UMRSGIY3DKRRVIY3UMRSCIZCEKMZWIY2UMN2FIZBEMNSEG4ZEMQSGIZBUMOJXIY3UKRRZIY4UGNZTIZDEMMBYIU3TKMSGIM4DOQ2GIZDEMRKGINBUEMKGIJDDORSGGBBTGNCGHFDDORSGHFDECQRXIZDEMQKFIRDDGRCCII3TGRSGIVDEMQSGIZCEMMBSGMZUCQSFG5DEMOCEGNCTOMKGIZDDENJYIIZDIRCDGIYDIQRRINCTEN2BIZBTAQZRGQYEMMZWIQYDOOKEIY2DSQRWIM3DSMKGG5CUIRJVGU3UINSFIIYTAOBRGZDDONBUGQ4UGMCCGVCDCMJWHE4EMQZXHA2TQNRRII4EKMCGIVBEIOJWIFCDMRJZIMZTKMBUGNATQQ2DIU2EGQZYINCDERRYII4DQNBZIQ4UMN2FGFCUKRKEIEZTSNBVINATMQRXGVBDGMZXGRBDMQZQIUZECNKFHE4EINJWGMZTORRXIFCTQM2BGIZTSMSCGQ3DCRCCGFBUGMRUGZAUIRCCGMZDMQZYIVBEIQRYIQ2UGMKCIQ4DERSBIZCUGNKBIY2DQNJVGM4DMOBYGE3TEQSCIVCECNJXIJCECQSCGIZDGMZYGQYTAOJXIJBTGNBUINBUEMCGIRBDARRXGY2DQRRWGU3EINJQGRDEIQZTGU4EINZXGE2DCOKEGYYTQM2CGVCDGRBQIVDECRBUG43UMOJQIVBEMNBUIY2TIMRYGQZTCQZWGE2TARJWGZCUKMBYGE3TCNZUIFCTKNRWIY3TQQJYGE4TQQJZGY4UEOJRGIYUINZUIY2UKN2BIMYEERBTGA3UGRCEINCDANRQINCDCNSDIIZEKMJWIVCTKRKDGM4UGNKCIIZTSMJTII2TSRJUINAUENBUIFBEGMZSGYYTONJSGVCTKNBYIQ2TOMBXGQ3DGOBRGA4DCNCBGQ3EMQ2FINCDOQSDGE2DOOJSGEYUENBRIVBTOMJYGE2UMOJXIUZUEMJUIM4UMNZZIU2DEN2DGJCDGMSFIY2UGQRXIFBDQNBRGU3TOQKGGYYTQRJRINCUMRRZHA4DQNKDG42TGOKBGNCDKRJUGQ2TCRCCIQ4DIQKDIVDDMNZRIJCDINZQIQ3TCNRXGUYUGNZRIQ3EGNBSIY4TIRKDGIYDQRRTIU3DEMRZGU4DKRRXGYYEIQZSIFATEQRQIFBEMQ2BGZBDINCCGQZUINCEHA2UEM2FGU2TQQKDIE2TAOJUIRBTKQ2GIMZTEN2DG4ZUIMZUGYZTQOKFIVCEGNRUHAYEMQZXIUZUCOCCIM4TOMRUIRDECNZQGRDDKRKBIMZEENJUGRBTSQJQGI3TINRVIMYDIOBTIEZEMMBTIFBDSNZTII2DQOCGG44DINJRG44UIQZXGZBDKQJYIYZDSMCDII4TEQZRINCDOOBZGI2EEM2BIJDECQKBGRCDGRCFIQ2DSQKBIUYUKMRVII4EKMZUIM2DQQKFGY2DORRYGUYTKNBVGE4EENBYGQZUCNCGIZBDIQKFGFBUINZZGRBECNZQGU4DONZZG4ZDIOBTIFCUGNSGIM4UKN2CHE4TEMCDIRATANBWHEZEKRKGIIZTCNRWIJDDSNZZHE4UKMCEGIZTSRBVGQ4UGRCGIQZECMSGGE2DSOJWGQZUGMRXG5BDSOJRIVDEKMCDGAZDKQRQIJBUIQSBIVCEEMZRIY3TKRCFIE3TIOJQG42EIRKBHA4UMOBWIUYDKMSFGVBEGRSFG4ZEGRBXIMZUIQKFINCDGMZQG44DMQJVGU4EKOJUGNCTCNZYG43EEMZZG5DECRBVG4YTEOJSIFCUGRJQGU3UKNRVGU4TKNKFGVBEEQRYGI3UMQKDGVCUMRKGIIZUGNBQIRATSOJWGBBDGNKGHE4ECRCDIQZUCMSFINBDCOKEHBAUCRSFGA2DMM2CIM2TKRSGGM4DQRRTGNBTOMJRGBATGMRQIM4TCRRZIUZEKNZRIYZTONJRIM3DSMBUGEZDSRJYIVCDKRRRIU3EMMZQIM3UKMJZG5BUKMBSHFDDAQKEIY3DORBQGUYEMNBTHE4DAN2DHE2UKQJRIU2DAMZTIVCEKMBTGNDECRJQIY2EERJWIU2EGOBRGY4UIQ2GIIYTCMRRIZCDKOKBIMYTMRRRGQ2EKOBXGVCTSNSCGI3UGOBYHBDDSQSBGUZTCQZZGUZTKMJXIVCEIRJZGY3TGN2FIY3TGMJXIY3DCNSGGQZDARSDGBATOOJVIFCTMRBTGEZUGNBWHEYDOMCGGVBTANZTIVCECNJWIU2UCNBZGZAUEMKCHFDEKNBXHA4TMOBVIY2TSNSEGBCEKNBTGMZTGNJVIYZTKOCGIZCTINCBGUYUMMJRIU4UMMCDHBDDCRCDHEYUCMZZGMYTOQKGIE3TGOBUIU4DGMSGIMZEKMSGGVBECNRWGEYUCOKFGYYUKQSEIY3UGMZTIUYTOMRSIQYEIMJQIMYTKOBXG5BDKMCGIZAUKOJWHBDDARKGGNCDIRCGGJCECRCFGY2EERRUG4YDIRJWIVBUKQRYGBBEIRSFG4YUCNRZIUYUEMBRIU3TSMBXGQ2DKRSFGI3EMOKBGI4UGQKBIM3DCNZWGJCECQSCIRAUCNCFIRCTSNCCIY2EMQZSHE4UCOJUGNATSRRRIYZEMRKDGI4TIQJTGRCDOQ2BGAZTEQJYHFCEMRBVGZATGNBVHAYTEOBTGVDDGQRRHE3TQQ2DHBAUKRRQIMZEEQSEGRBTONJVHBBDGMBUHFBDIRBVIIYTOMKBHA3DSNKCIRDDERBXGQYDCQZRGIYECMZWIM2TOOCGII4DCQKEHBCDMRJTGU4DIQKCIUZDSMJTGJDDONJWGU2TKOJYGI4TINJWGJBDOM2CIFCDQQ2DG43UGOJRGYZEKRCBGI2TKNZYIIYUKRRVIJATMNZYHA4DIMZXGY2TQMBSGFCUIRBWHAYEMOKFIY2DCOJSGVBEKQZTIM3EMNZRGIYDCNJXGRDEMMJRHE2TEOCGIU2TOMJRIFBUIOKFIQ4DQQJUIQ2TOQ2FGQYTEOJQIVCUGRBUGI3EGNKEGNCEGOCEIU3EKMZRHE3DGQKEGQ2DGOBTIY3DQQKGG5CEKQSEGIYEKRJWGU2DSRCFHE2DMQKBINCDCQZXIFBEENCFIM2ECQKDGE2TEMBXGE2EGQJXIU4UEQSCIQ4TERKCGFBTKNZTIJBEGRJTG4ZDQOKDHA4EIMCGIZBDSNRSGRAUCMJVIM4EIOKFG4ZUMNZRGY3DARKEIE3EIMBYHFBTOQJTIQ4EKNKEIUZDMQ2GHEZTOMJSIFATKQ2DGNBEIRBYGE3TSMZUIQZTMQSEII2TCQKBIIZDEMBWII3UEOCBGE3TARCDIY2DCMJZIYYTMOJSGZCDERBTHBATGRKDHE3DKOBYG44DMMBZIY3TIN2DGZBEEQ2GGQ3UKQZZGMZECNBZGU4DOQZSGNCTGRKEIMYDKNCDGI2DERBTHA4TAQRTINBUGM2BII3TEOJRIEYTSOBSGVDDQMZSIE3TIQZZIFBUKNCBIRCTIM2DIMZEKQRYINAUINZYHFCEIN2CGI3UEM2DIEZTSNBSHAYTSRSBINAUGRCDGMZUCRJTG44TKNZUIQ2EKN2EGFCTAMCFIVCDCMRYIJBUMMRVGM2TORRXGM3DGM2BGQ3TAMZWG43TKNRWIRBTKNCFIZCTCRBUGM4TSRJQIU2EIOJYGAYTARJTINBTINKEGI2DGRKBIU4ECNSFHAZEENJTIIZDMNBRGI3UCOJVHE4TQOJSGM4UCRSEG5ATKQSDIVBUIOBSGUYEMQJSG42EEN2CGZBUKOJTHAYDINJSHFBUCOJYGEZEGNKEGRBUKMSGIRCUIRKBIEYUMMRTIYYTCRCEGI4TKRJXGVAUGNBZIYYTQNCGGI3TGQZQG44DKNJYGQ2EGQKGG4ZUKMBXGUZTCNBRIM2DSQRRGE2TENSCIU2DCRKBGBCDKNZVIEZTGMJQGQZTKMSCII2UENZXHE3UINSGGZBTQQKEIEZTQRBRGI4DIRKFGI2TQOCCGQ3DCOCBGA2TARCBII4DIQSBGJBDGRJXIFCEGM2DII2DGM2DHFAUEMSBHFCUINJUGNCDQN2FIQ4TSOCFIIZTINJXGM2TQQKBIU3UIQZYGA2DAQJVIU2EIMCGIUZTONBYGVBUGNZXGUZEINZZII2TSNSBIM2TEQZWGEZDMM2EGEZEINRZIEZTSNZWIJBUKQSGHFATMQJWGNBEGRJZII3TMNJSGYYDGOBZGE3TENBZIRCEIMRZIY4TOQ2DGUYDGMRWGJBECQJQGUZEINJUGMYECOJSIZBTMNCGGI3DMOBWGFDEGRRWII4ECMZSGFCDSNZUGBBEKM2CGIYDGOJUGRBDSRJZIE2DGQ2FG43TQN2DG43UKRCDIIYUMQZXIJBTGMZYHA3DAOKEGU3TGNSEGUYUKMBSGQZTANJZGRATSQKBGA4EIRKEG5DDSRKFGE2UMOCDGM4DIQRWGJDDENRQIZBUIRJUGA3DIMZXIVBDSMCBG5ATOMBVIY4DQRKCIVBDENRWGYZDAQSDINBDMMCGIQ4UKMRVIFDECNBTGEZTGMCCIM4TGMCFIRCEIN2CGM4UKRRRGE3DSQ2FIFBEKQSEIRCTMMKCHEZDKRJWIU3ECMKEGU2EGRSDIU3TGMRZGNAUINJRGRCTINKGGEYTORBZIQ2TOQ2GGFAUGQZZGZDEEMCDIMZUCMRQII3UEQ2BIZBDEMRTGFBUEMBYIEZUMNBQIVBEMMSEIVDEINBUGBDDCQZQGIYEGQRTGFCUIRJTGUYDMN2EIUYTQNBWGM2EIRJSGFCUEMCCIRBDANZWG5ATGOBQG5CTERJQIJATENJSHBCTEOBUHAZUMRRSGA3EMMJVGEZTOMJSGUYEEN2FIRATORBZGYYDGMSBGBBDQQZSIEZTANJSGQ2TGNCBGA3TQRCCGYYTEQRZHBBEGMSEHBAUCQRZGFDDORRSIEZEIQKFIQ4UGQRRGVATSQRYIMZDCMCCIU4TCMRUIM4TONBSG42DEMZVIRBTINRSGQ3UCNKCGQZDONRQGMYDKQJRIVATCMRWGRATIRKGIMZDSNBWGFBUGN2DINBUMRKEGQYTGQJQGAZTINCFGFDDMRBXHBBEMQRSIRCDKRJSGI2TEMSBIMYUEMRXIU3UENJYGAZUMMRUGNAUCMJXGUZDCQRUGBATANCBGFCDKOKCGM4TANSCGFCUKRBVIMYDQMZSGY4DKRJTGFCEEQJQGZCTCRSEGI4TIRBYGE3DMOBRHA4UENBYGVCEEOBUGBCDQNSBIY3TINCDIIZTSNSFIQ4EGRJWGQYUINBWIZCTGN2CGRBDON2CIMYTMRBSIZBEIRJTHA2TCRRQII3TMRJWIRBEINBWIRCEMQRRIE2UCMSFG5BEEMJWIFCDKNSBIU3DCQJYGJATQMSFGVDDKNRVGEZECRSDHE4TOMKFINAUCQJUGA3EMN2DIY3EIMKFG43TEMSFGY4DSMZZIY3EMMBSHBCUGRCEGAZDGQKFIJDDGOJRGE4TMNRXGAYDQQKDGZDDAQRSGU3TSQZVGU4TQQRQHBBTGNSEGU4EGMCEIIZTSNJQGUZTIRJXHFATOQKGGY3EKMBUINBTKOJVGY3EGOCEIRATCQZTIM4DCQ2FG43TQRRXIM2TAMJWGBATARKGGM2DKNSCGQYDKMSDGIYTAMJVIIYECRCEIRBEMNJWGJDDAMJUG5AUKNBXGA3DCMKBGYYDORCCGY3TCMBRIIYDIRRUII2UMNBTGI2TKMRVGFAUCN2DIE2DAQZZIU4EKQKBIM2EERKGIJBTIMRXIJBUGNRXIE4UINBYGAYDORRSGU2UCMKGHBBDGRBXGNDDKMZZIRAUMNZXINDDKN2DHAYDGQRQGFCDKQRWGVCEGOKDIRATSQSFGAZUIRRWHBCDGOKFIVBEERBRGQYTOMBZG43DAOBXGY2TMOCEIEYDENZYGM2TGNBWHEYTQOBSGUZDCNBWG5CTONZWIJDDSMKFIY4TQQKEGYZDQMRXHE3UIMBQGZDECMCCIM4EMNJTGIYTSNKCIMZDQRSCIRDDGMBTHA2UIRRRGMYUGRKBIQ2UMNCFIRCEEMBRGEZEEMJQGRCTGNJVIVDEEQRZG4YDMMBTIUZUCRKCGMYDMNJSIM4TANSCGE3TSMSBGMYTANZRGQYTOOKDIRBDQMBZHAZUMNCBGVCDANSBIJCTCNSDGE3ECRRUGM3TONJUINBDANJSHE3TMRBTGEZTSOBXGM4ECMRZGEZUERBVIFCDIQZRGFCDGN2DIMZTAQSGGI4DCMZRIIYDGNKFGI3DAM2DGNCDIMCCGMYDQNZRIY4TMQKFGEYDCMSEGQYTMQJTIVDDAMBRGNBDCMBYGZATEQJWGFBDEMJZIVCDCNBZII3UERJXGYYEMRBSGRCTKQKDIRBEIQRWGRBUIOCGGAZTOMJTGA4EEQZTG4ZUEMBRIVCEKMRWIM3TKNRXHFBTMM2CGY4UKQJYIM2UCNBXGM4TSM2CGRCTGNZYIM4DOMJQGA4DEOKFGQ3UCRKDGNCDCRRUGRATEMZYGUYUKNRVHA3EKRRTGNBTSOKFIRBDCOCEHEYTKOBYIQZDMQRXGA2DQNZRHEYUKQJZIQ4DOM2BHFATKNJRIY4DKMCFGIYDANRXHFCUMQJYGAYTQRJXG44DIM2EIY3EMQZUIE2EMMCBII2DQRJSGU2DKRRTINBTANJUGA3DEM2GGI4DAQZXGEZUCQKGGYZTSQRWIRAUENKFG43TQQZXIJBDGQSEGAYDMNKGG44DQMBTGA3ECNZUIFATQOJXGNAUGQZTIE4EGQRYGJDECQSBGRDDONZVGQ2ECQSCGMZEMRKCHE4DMMRVIYZTKM2DIVDEEQKGGQYDKMSCIFCDARJWHA3TCN2GIVCTEMJVINCEKNZXGA2DERJUGIYDKRRZGIZUGRRVIFCECQSBGBBEKMCEGAYDSOCFGVDDMQZZIVATQQJYG44EENSEIQ2DGOBSIQ2DOMRRIE3TGQKGIIZDINKBIJBTIMBSIZATMNRXGYYTKNZWGM4EENCCG5CUKMBUGQ4TKRSEGAYDKNJTGNBECRKFGI2DKQ2CGI3UKMCFGI3TGNJSGA2UKM2FIYZDENBVG5CEEMCFG44DSNRVHAZDANBRIY4EEMKFIQYEKOCCGNATANJUGQ2TARJZGJCEIQJRGJATCN2FGU3TARBZGQ4TQM2FG5DEINBQIVBEMMZVGU2UKNZSGAYDSNBYIVDDGQZZHBATCQ2FG4YECQKCGAYUCQJWIU3UERRRIVCUCQRWGI2TGNBSGY4EKOBTG44TMMRUIFBTKNZUG4ZDANBVIQ3DQQRZIQYTMRKEII2EGQZQIQ4TAM2DGFCUEM2CGIZTGRRZGEZEEQJWIE2EMOJVGU4TQQZWGE3UINJZIMYDQQZSGNBEERJYHA4DMMRQIZBDKRJVIMYEKN2EGNBTMRBYGIYEENBWIUZEKRSFGRAUGOBQIFBUEMKGGZDDGRRXGVDDENBYHAZUERKCIYYTOOCCGBCTGRBTIFATCRBWGVAUGQJZGRCDSRRXGY4UENZZGAZTIM2GHAYUEMZSGA4UGMSDIMYDENZRGQYTANCGIJDECOBSG4ZEKRSDHBATKNRYIQYECMSFGVCEMNZQHFAUMMCFG43UENCEGZDEENZYGA3DSOBRHA4TQOBWG44DKOBUHFATCMJWGBBEKQZTGBBEKNCGGM4DKNJVIVCUENKBGE4TANRSGY4TSOBSIRDDGMZYGYZECMKFGYYDGMSBGZCDENZXGM3DKQZVGFAUKMZVHE4TCMZRGAYTMRRXGAZUMRRQGY3UMMZUHEZTINBQGMZDGNRSGNATGNRZGE4UIRCEHBCDIQSGIQ4EGN2EG42TAOBZHA4TQRJUHA2UIQJYGEZTIQZWG5BTENZUGU2DANZVIFBDGQKFGVATIRBRGRBEIOBTG43EMRJVINATEOCDGYYDKQRQGFBTANRTGMZDCRRRG5AUKM2BGNBECNZRGAZDCQRZHE2DAOBTII4TIRRWG4YUCQJVGEZTSMBTGAYDCQZXIE2EIQRZGUYEKRJSGZCEKMKGGEZUIRCFIZBDERBUIIZEKOBQGM3UCQJTGVCTIQKBIUYUGOJTII2DGMBWHEZDEOBTHAZDGMCCGE2DQMRYGIYDMMRTHBCTKNBQGU3EMMZWGI4DKQZWIQZDEMBSG4ZTCNBUG43DEQSCIVATAOJRG44DAQZVII2DANCDHBDDCOKFIEYTORCBG4YTSMBZIMYTQNCBIU2TSRRVG4YTIMZYG5CDQNZVIQ4EIM2CIMYDOMCGHBDDONRRGU2DIMRUGE3TINCDHEYDAQJQIJDDSOKEGJDDSNBWIM3TEOJZGI3TMMBUGAZEEMJSIRCEGNRSINCTKNSDIE3TONJQIUYDIRJTGEZTCQ2EIE4TMOCGIFATKOBZGY3TIOKCIM3TARBQIQ4TKRJSGE3DGM2GGVCDIRCDIM2DIOJUIVCDKRBYGRCUMMBWIUZEGQZSIZBTQM2FGYZDMOJRIVBTKQRWIU3DSMKEHA2UMNSEHAZDKNZYGM3UKM2FGQ3TKRSBIQ3DIMCCIU3EKMBSIRBEIOBQINATAMRYGQ3DSOKDGQ3TMRRTHE2UMRBSHEZUKNSGGAYTCMBTGRBDCOBZINCUIQRRGQ3TAQKFIY4DSMRSIUYTMMRSGNBDQNBUIUYTINZSIJCUMRBUG4ZUMNCBIMYEGNRUIEYDONRSGJBTCNBWIU2DORBUG44DMMKCIVBTCRBQGQ4TENKEGRDEKQZYG44DARCGII2TQMBYGUZTIOCFGVBDGOBYGEZECOCEGBCTKM2CIIZUEOBRGQYTANCGGZCUMNZZIY4EIOCCGFBDMMKFIU2DGOKFINBDERBSG42UINJZHBDDQQJRIQ2TSOCCHBBTEQKBGM2UGMCCGI3UENJVII4DGNZYIUYDOM2GHFCDQRKBIMZDMRJTIM4EIQSBGNCUIQJQHEYDKMJUGI4UENRQIZDDGNZTIVBDCQ2FGQYDQQKGGIYECNZYGJCTQRBYGE2UCRBYGMYTCOBXGMYUGMJYGAYDOOBQII2TINBYHBBUKMZQGVBEGNRRHBDDGQ2GHA4TANSFGRBEMMBXIJBTONBTGYYDOOJWGQYEIMCBGBBDSQRQIEYUIQJTGBBDSMSDIJBUGRCDHBBDKMRUGUZDKOBVIIYECQZWIU3UMRBSHE2DEMBQGVBEMNBRHAYTGNRUHBBEEOJVGVCEEMBXGJCTIOBYIIYEKRRUII2DERSBG5BUCMKGIE3UEMJQIQ4UCMBXIMYUEOKCIQYEEQRSGI4TOQJXHA4TEMRQGQ3UCMBUGFCUCNRUIJCDCQZWGNATQRBSGFAUGRSGGMZUERJUGYYDIOBQGA4DSMKGGEZUIRBUGIYDQRRUHAZUKRRVGZBEKRRRGFCTAOKDG4ZTMNBVGBBTAN2EHA4DCQZXGM4TQOJQGBATKOBYGEZTMRSCIZAUGQZQIQZTQRCEG4ZDMMJSHA3TCN2BGI2EKNRZIJCEMQKDGY3TSRJQGQ4DAMCGGQYTSM2CHA3EGNRQGMYDSMZUGMZDORRUG42TONSDGQ2ECNRYIEYEKNKGGA2EIRBQIQ2TOOJZGQ3DMRRZGQYDGRJWIY2EKRSEHEZTCMRUIMZEGRSEIFCUKQ2BGAZDKMJTGRBDIOCDIYYTOOBUIRDEKNBYGA4DKMSEII2DMNSCIJCTINBQGZBTEQZYGNBTQOJSGJBEKNCGG4YDERJWGQZDEMCDGQ3DMRBVGJDECNBRIJATIMRUHA4EIOKDIVBECMBQHFCTONSDII4UGOCFIM4DARSCIJCUIMRTIRCUCMZUGBBDCMZQGA2EGQZWGBDDOOCDGQ4DQRRXGU4UENRXGVCTENJSHBCTGNSFIE4TMOKEIEZEMOJUGBCEEOJZIFDDENRVGBBDIOKDGJBTKMBZGA2TOQ2FGY2TSOJWHA2TEMCEGNBEGMKCGNBDCRCFHA3UGMKCIJDDGMJYIZBDQOJXGE3TKOCGINDDMMCGGIYTSQJYGFCUMMCEGUZTGQJTGBCEGNRTGRCEGMCBHAYUEMJUGMYTARJZHA3EKOBTIY4DAOJZGI4DCOBTGA4TANSEHE3DQNCGGA4UIQKBIFBDCM2DIQYDONJZG42DMRKGHBCTCNBUGYZTORCGIEZEIQJVG43TGQZYIIZDMMBZGRBTORRXIVBDSQJVII4TIMBRHAZTEMZZGNCDONBXGJATONBSHFCDEQ2FIEYTGMJSIMYUGNBTGA3TKOCBIQZTQNCEHA2TIOBUGEZEMQRWGY2UCOKEIFCTCN2FGEZECOBXHAZTMMJZGEZTMOBRIYZTAM2BINBTIRJYGIZTINSEGFCTSMRTGI2DAMZRIU2DKOJUGRBEGN2EGQYTENRRIVBDCQKCHAZTKMSEGM4ECOCCGUZDERCDGM3EEN2CG5BDGQRSIMZUEMRQGZBEKQKDIZCDKNJYG5ATIQZQIJCDQNZVIFCDKMRTIJCEENZVHEYTKMZQIJBEIMBVGYYDEMCCGM2EGQRUGVBDGRRQIJBDIMJUG4YUGRCFIUZTOQRZII2EKQRYGM3TMNRQIM3TMMKEIVBDAQKDIM3UKQ2CIUYDCRJTIU2DQM2FIJBDGMCBIM4TAMBTIUYDSQRYIUZDQNBZIEYTQNRXIUYUCNJZIZDEEQJUHBCDGMKDIE2TMRCCIY2EMQRXIYZDCRJRIEYUKNRUGRDDOQSGIZBUKOJUIY3UMRSBGVDDARSGGRDDCMSBFBOHQMBVLR4DAMC4PAYDAXDYGAYHIXDYGA3VY6BQGBOHQMBQLR4DAMDNMFZHG2DBNR2FY6BQGROHQMBQLR4DAMC4PAYDA6TMNFRHIXDYGA3FY6BQGBOHQMBQLR4DAMDCMFZWKNRUOROG4XDYGAYFY6BQGBOHQMBQMRSWG33NOBZGK43TOROHIXDYGAYFY6BQGBOHQMBQMIYTMZDFMNXWIZJILR4DAMC4PAYDAXDYGAYFY6BQGAUFY6BQGBOHQMBQLR4DAMC4PAYDAKC4PAYDAXDYGAYFY6BQGBOHQMBQONOHIXDYGAYFY6BQGBOHQMBQKN2W2YLSOIQESRDULR4DAOC4PAYDAXDYGAYFY6BQGA6G233EOVWGKPS4PAYDOXDYGAYFY6BQGBOHQMBQONOHQMBSLR4DAMC4PAYDAXDYGAYCIXDYGAYSOKJJ'))
| 1,456.444444
| 12,898
| 0.995575
| 38
| 13,108
| 343.421053
| 0.842105
| 0.001226
| 0.001992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065274
| 0.003052
| 13,108
| 8
| 12,899
| 1,638.5
| 0.933349
| 0.013885
| 0
| 0
| 0
| 0
| 0.996362
| 0.996362
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
dfcd27f0e3f17eee342ca1d024f5c9c1933d8cf6
| 4,251
|
py
|
Python
|
test/api/test_consent_type.py
|
ryomahan/read-tracardi-api
|
d0a012fb097ca81daf046b314000301eb54bfad8
|
[
"MIT"
] | 3
|
2021-11-27T18:03:31.000Z
|
2022-02-06T21:47:59.000Z
|
test/api/test_consent_type.py
|
ryomahan/read-tracardi-api
|
d0a012fb097ca81daf046b314000301eb54bfad8
|
[
"MIT"
] | 13
|
2021-11-03T18:15:06.000Z
|
2022-03-27T22:28:38.000Z
|
test/api/test_consent_type.py
|
ryomahan/read-tracardi-api
|
d0a012fb097ca81daf046b314000301eb54bfad8
|
[
"MIT"
] | 8
|
2021-11-16T04:07:41.000Z
|
2022-03-14T14:51:34.000Z
|
from ..utils import Endpoint
from fastapi import HTTPException
endpoint = Endpoint()
def test_post_consent_type():
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
data = {
"name": "test-name",
"description": "test-description",
"revokable": False,
"default_value": "deny",
"enabled": False,
"tags": ["tag1", "tag2", "tag3"],
"required": False,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
data = {
"name": "test-name",
"description": "test-description",
"revokable": False,
"default_value": "incorrect_data",
"enabled": False,
"tags": ["tag1", "tag2", "tag3"],
"required": False,
"auto_revoke": "incorrect_data"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert "detail" in result
assert result["detail"][0]["loc"][1] == "default_value"
assert result["detail"][1]["loc"][1] == "auto_revoke"
endpoint.delete("/consent/type/test-name")
def test_get_consent_type_id():
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
result = endpoint.get("/consent/type/test-name")
result = result.json()
assert "id" in result and result["id"] == "test-name"
endpoint.delete("/consent/type/test-name")
def test_delete_consent_type_id():
try:
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
result = endpoint.delete("/consent/type/test-name")
result = result.json()
assert result == {"deleted": 1}
result = endpoint.delete("/consent/type/test-name")
result = result.json()
assert result == {"deleted": 0}
finally:
endpoint.delete("/consent/type/test-name")
def test_get_consents_type():
result = endpoint.get("/consents/type")
def test_get_consents_type_enabled():
try:
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
result = endpoint.get("/consents/type/enabled")
result = result.json()
assert {consent["enabled"] for consent in result["result"]} == {True}
finally:
endpoint.delete("/consent/type/test-name")
def test_put_consents_type_refresh():
result = endpoint.put("/consents/type/refresh")
def test_get_consents_type_by_tag():
result = endpoint.get("/consents/type/by_tag")
| 25.153846
| 77
| 0.549988
| 441
| 4,251
| 5.201814
| 0.126984
| 0.06626
| 0.069747
| 0.095902
| 0.82476
| 0.765475
| 0.765475
| 0.765475
| 0.730166
| 0.669137
| 0
| 0.012724
| 0.278993
| 4,251
| 168
| 78
| 25.303571
| 0.735726
| 0
| 0
| 0.747967
| 0
| 0
| 0.285714
| 0.053277
| 0
| 0
| 0
| 0
| 0.178862
| 1
| 0.056911
| false
| 0
| 0.01626
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a03286400fc7c47288b0c4f0a89d1cf79485789b
| 29,715
|
py
|
Python
|
flasky2/venv/Lib/site-packages/weka/flow/sink.py
|
akshat0109/kisan_backend
|
b26fb382fa5377010be60e6edd5e57b3e6c5b3fe
|
[
"MIT"
] | null | null | null |
flasky2/venv/Lib/site-packages/weka/flow/sink.py
|
akshat0109/kisan_backend
|
b26fb382fa5377010be60e6edd5e57b3e6c5b3fe
|
[
"MIT"
] | null | null | null |
flasky2/venv/Lib/site-packages/weka/flow/sink.py
|
akshat0109/kisan_backend
|
b26fb382fa5377010be60e6edd5e57b3e6c5b3fe
|
[
"MIT"
] | null | null | null |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# sink.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import traceback
import weka.core.serialization as serialization
from weka.core.dataset import Instances, Instance
from weka.flow.base import InputConsumer
from weka.flow.container import ModelContainer
from weka.classifiers import Evaluation
import weka.plot.classifiers as pltclassifier
import weka.plot.clusterers as pltclusterer
import weka.plot.dataset as pltdataset
class Sink(InputConsumer):
"""
The ancestor for all sinks.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(Sink, self).__init__(name=name, config=config)
super(InputConsumer, self).__init__(name=name, config=config)
def post_execute(self):
"""
Gets executed after the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super(Sink, self).post_execute()
if result is None:
self._input = None
return result
class Null(Sink):
"""
Sink that just gobbles up all the data.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(Null, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Sink that just gobbles up all the data."
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
return None
class Console(Sink):
"""
Sink that outputs the payloads of the data on stdout.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(Console, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Sink that outputs the payloads of the data on stdout."
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return "prefix: '" + str(self.config["prefix"]) + "'"
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(Console, self).fix_config(options)
opt = "prefix"
if opt not in options:
options[opt] = ""
if opt not in self.help:
self.help[opt] = "The prefix for the output (string)."
return options
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
print(self.resolve_option("prefix") + str(self.input.payload))
return None
class FileOutputSink(Sink):
"""
Ancestor for sinks that output data to a file.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(FileOutputSink, self).__init__(name=name, config=config)
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return "output: '" + str(self.config["output"]) + "'"
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(FileOutputSink, self).fix_config(options)
opt = "output"
if opt not in options:
options[opt] = "."
if opt not in self.help:
self.help[opt] = "The file to write to (string)."
return options
class DumpFile(FileOutputSink):
"""
Sink that outputs the payloads of the data to a file.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(DumpFile, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Sink that outputs the payloads of the data to a file."
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return super(DumpFile, self).quickinfo + ", append: " + str(self.config["append"])
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(DumpFile, self).fix_config(options)
opt = "append"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to append to the file or overwrite (bool)."
return options
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
f = None
try:
if bool(self.resolve_option("append")):
f = open(str(self.resolve_option("output")), "a")
else:
f = open(str(self.resolve_option("output")), "w")
f.write(str(self.input.payload))
f.write("\n")
except Exception, e:
result = self.full_name + "\n" + traceback.format_exc()
finally:
if f is not None:
f.close()
return result
class ModelWriter(FileOutputSink):
"""
Writes a model to disk.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(ModelWriter, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Writes a model to disk."
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if not isinstance(token.payload, ModelContainer):
raise Exception(self.full_name + ": Input token is not a ModelContainer!")
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
cont = self.input.payload
serialization.write_all(
str(self.resolve_option("output")),
[cont.get("Model").jobject, cont.get("Header").jobject])
return result
class MatrixPlot(Sink):
"""
Displays the Instances object as matrix plot.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(MatrixPlot, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Displays the Instances object as matrix plot."
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(MatrixPlot, self).fix_config(options)
opt = "percent"
if opt not in options:
options[opt] = 100.0
if opt not in self.help:
self.help[opt] = "The percentage of the data to display (0-100, float)."
opt = "seed"
if opt not in options:
options[opt] = 1
if opt not in self.help:
self.help[opt] = "The seed value for randomizing the plot when viewing a subset (int)."
opt = "size"
if opt not in options:
options[opt] = 10
if opt not in self.help:
self.help[opt] = "The size of the circles in the plot (int)."
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the plot (str)."
opt = "outfile"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to wait for user to close the plot window (bool)."
return options
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return "percent: " + str(self.config["percent"]) \
+ ", title: " + str(self.config["title"]) \
+ ", outfile: " + str(self.config["outfile"]) \
+ ", wait: " + str(self.config["wait"])
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if not isinstance(token.payload, Instances):
raise Exception(self.full_name + ": Input token is not an Instances object!")
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
data = self.input.payload
pltdataset.matrix_plot(
data,
percent=float(self.resolve_option("percent")),
seed=int(self.resolve_option("seed")),
size=int(self.resolve_option("size")),
title=self.resolve_option("title"),
outfile=self.resolve_option("outfile"),
wait=bool(self.resolve_option("wait")))
return result
class LinePlot(Sink):
"""
Displays the Instances object as line plot using the internal format, one line per Instance.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(LinePlot, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Displays the Instances object as line plot using the internal format, one line per Instance."
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(LinePlot, self).fix_config(options)
opt = "attributes"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The list of 0-based attribute indices to print; None for all (int)."
opt = "percent"
if opt not in options:
options[opt] = 100.0
if opt not in self.help:
self.help[opt] = "The percentage of the data to display (0-100, float)."
opt = "seed"
if opt not in options:
options[opt] = 1
if opt not in self.help:
self.help[opt] = "The seed value for randomizing the plot when viewing a subset (int)."
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the plot (str)."
opt = "outfile"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to wait for user to close the plot window (bool)."
return options
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return "percent: " + str(self.config["percent"]) \
+ ", title: " + str(self.config["title"]) \
+ ", outfile: " + str(self.config["outfile"]) \
+ ", wait: " + str(self.config["wait"])
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if not isinstance(token.payload, Instances):
raise Exception(self.full_name + ": Input token is not an Instances object!")
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
data = self.input.payload
pltdataset.line_plot(
data,
atts=self.resolve_option("attributes"),
percent=float(self.resolve_option("percent")),
seed=int(self.resolve_option("seed")),
title=self.resolve_option("title"),
outfile=self.resolve_option("outfile"),
wait=bool(self.resolve_option("wait")))
return result
class ClassifierErrors(Sink):
"""
Displays the errors obtained through a classifier evaluation.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(ClassifierErrors, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Displays the errors obtained through a classifier evaluation."
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(ClassifierErrors, self).fix_config(options)
opt = "absolute"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to use absolute errors as size or relative ones (bool)."
opt = "max_relative_size"
if opt not in options:
options[opt] = 50
if opt not in self.help:
self.help[opt] = "The maximum size in point in case of relative mode (int)."
opt = "absolute_size"
if opt not in options:
options[opt] = 50
if opt not in self.help:
self.help[opt] = "The size in point in case of absolute mode (int)."
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the plot (str)."
opt = "outfile"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to wait for user to close the plot window (bool)."
return options
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return "absolute: " + str(self.config["absolute"]) \
+ ", title: " + str(self.config["title"]) \
+ ", outfile: " + str(self.config["outfile"]) \
+ ", wait: " + str(self.config["wait"])
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if not isinstance(token.payload, Evaluation):
raise Exception(self.full_name + ": Input token is not an Evaluation object!")
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
evl = self.input.payload
pltclassifier.plot_classifier_errors(
evl.predictions,
absolute=bool(self.resolve_option("absolute")),
max_relative_size=int(self.resolve_option("max_relative_size")),
absolute_size=int(self.resolve_option("absolute_size")),
title=self.resolve_option("title"),
outfile=self.resolve_option("outfile"),
wait=bool(self.resolve_option("wait")))
return result
class ROC(Sink):
"""
Displays the ROC curve obtained from a classifier evaluation.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(ROC, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Displays the ROC curve obtained from a classifier evaluation."
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(ROC, self).fix_config(options)
opt = "class_index"
if opt not in options:
options[opt] = [0]
if opt not in self.help:
self.help[opt] = "The list of 0-based class-label indices to display (list)."
opt = "key_loc"
if opt not in options:
options[opt] = "lower right"
if opt not in self.help:
self.help[opt] = "The location of the key in the plot (str)."
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the plot (str)."
opt = "outfile"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to wait for user to close the plot window (bool)."
return options
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return "classes: " + str(self.config["class_index"]) \
+ ", title: " + str(self.config["title"]) \
+ ", outfile: " + str(self.config["outfile"]) \
+ ", wait: " + str(self.config["wait"])
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if not isinstance(token.payload, Evaluation):
raise Exception(self.full_name + ": Input token is not an Evaluation object!")
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
evl = self.input.payload
pltclassifier.plot_roc(
evl,
class_index=self.resolve_option("class_index"),
title=self.resolve_option("title"),
key_loc=self.resolve_option("key_loc"),
outfile=self.resolve_option("outfile"),
wait=bool(self.resolve_option("wait")))
return result
class PRC(Sink):
"""
Displays the PRC curve obtained from a classifier evaluation.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(PRC, self).__init__(name=name, config=config)
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Displays the PRC curve obtained from a classifier evaluation."
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(PRC, self).fix_config(options)
opt = "class_index"
if opt not in options:
options[opt] = [0]
if opt not in self.help:
self.help[opt] = "The list of 0-based class-label indices to display (list)."
opt = "key_loc"
if opt not in options:
options[opt] = "lower center"
if opt not in self.help:
self.help[opt] = "The location of the key in the plot (str)."
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the plot (str)."
opt = "outfile"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to wait for user to close the plot window (bool)."
return options
@property
def quickinfo(self):
"""
Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str
"""
return "classes: " + str(self.config["class_index"]) \
+ ", title: " + str(self.config["title"]) \
+ ", outfile: " + str(self.config["outfile"]) \
+ ", wait: " + str(self.config["wait"])
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if not isinstance(token.payload, Evaluation):
raise Exception(self.full_name + ": Input token is not an Evaluation object!")
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
evl = self.input.payload
pltclassifier.plot_prc(
evl,
class_index=self.resolve_option("class_index"),
title=self.resolve_option("title"),
key_loc=self.resolve_option("key_loc"),
outfile=self.resolve_option("outfile"),
wait=bool(self.resolve_option("wait")))
return result
class InstanceDumper(FileOutputSink):
"""
Sink that dumps the incoming Instance/Instances into a file.
"""
def __init__(self, name=None, config=None):
"""
Initializes the sink.
:param name: the name of the sink
:type name: str
:param config: the dictionary with the options (str -> object).
:type config: dict
"""
super(InstanceDumper, self).__init__(name=name, config=config)
self._header = None
def description(self):
"""
Returns a description of the actor.
:return: the description
:rtype: str
"""
return "Sink that dumps the incoming Instance/Instances objects in a file."
def check_input(self, token):
"""
Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token
"""
if isinstance(token.payload, Instance):
return
if isinstance(token.payload, Instances):
return
raise Exception(self.full_name + ": Input token is neither an Instance nor Instances object!")
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
data = self.input.payload
if isinstance(self._input.payload, Instance):
inst = self.input.payload
data = inst.dataset
elif isinstance(self.input.payload, Instances):
data = self.input.payload
inst = None
append = True
if self._header is None or (self._header.equal_headers(data) is not None):
self._header = Instances.template_instances(data, 0)
outstr = str(data)
append = False
elif inst is not None:
outstr = str(inst)
else:
outstr = str(data)
f = None
try:
if append:
f = open(str(self.resolve_option("output")), "a")
else:
f = open(str(self.resolve_option("output")), "w")
f.write(outstr)
f.write("\n")
except Exception, e:
result = self.full_name + "\n" + traceback.format_exc()
finally:
if f is not None:
f.close()
return result
| 30.198171
| 109
| 0.570082
| 3,588
| 29,715
| 4.660256
| 0.080825
| 0.01854
| 0.029663
| 0.037079
| 0.842234
| 0.826745
| 0.813707
| 0.805813
| 0.798816
| 0.789965
| 0
| 0.00176
| 0.330877
| 29,715
| 983
| 110
| 30.228891
| 0.839209
| 0.022951
| 0
| 0.724537
| 0
| 0
| 0.169832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.020833
| null | null | 0.00463
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a0882244036a99359abc60506107aa145ee78134
| 26,815
|
py
|
Python
|
com/vmware/nsx/app_discovery/sessions_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/nsx/app_discovery/sessions_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/nsx/app_discovery/sessions_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.app_discovery.sessions.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class AppProfiles(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.app_discovery.sessions.app_profiles'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _AppProfilesStub)
def list(self,
session_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Returns the application profiles that was part of the application
discovery session | while it was started.
:type session_id: :class:`str`
:param session_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.AppProfileListResult`
:return: com.vmware.nsx.model.AppProfileListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'session_id': session_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
class InstalledApps(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.app_discovery.sessions.installed_apps'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _InstalledAppsStub)
def list(self,
session_id,
app_profile_id=None,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
vm_id=None,
):
"""
Returns the details of the installed apps for the app profile ID in
that session
:type session_id: :class:`str`
:param session_id: (required)
:type app_profile_id: :class:`str` or ``None``
:param app_profile_id: (optional)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:type vm_id: :class:`str` or ``None``
:param vm_id: (optional)
:rtype: :class:`com.vmware.nsx.model_client.AppInfoListResult`
:return: com.vmware.nsx.model.AppInfoListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'session_id': session_id,
'app_profile_id': app_profile_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
'vm_id': vm_id,
})
class NsGroups(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.app_discovery.sessions.ns_groups'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _NsGroupsStub)
def list(self,
session_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Returns the ns groups that was part of the application discovery
session | while it was started
:type session_id: :class:`str`
:param session_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.NSGroupMetaInfoListResult`
:return: com.vmware.nsx.model.NSGroupMetaInfoListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'session_id': session_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
class ReClassify(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.app_discovery.sessions.re_classify'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ReClassifyStub)
def create(self,
session_id,
session_reclassification_parameter,
):
"""
Re-classify completed application discovery session against input
AppProfiles. If no AppProfiles are specified then we use the previous
AppProfiles of that session.
:type session_id: :class:`str`
:param session_id: (required)
:type session_reclassification_parameter: :class:`com.vmware.nsx.model_client.SessionReclassificationParameter`
:param session_reclassification_parameter: (required)
:rtype: :class:`com.vmware.nsx.model_client.AppDiscoverySessionResultSummary`
:return: com.vmware.nsx.model.AppDiscoverySessionResultSummary
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'session_id': session_id,
'session_reclassification_parameter': session_reclassification_parameter,
})
class Summary(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.app_discovery.sessions.summary'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _SummaryStub)
def get(self,
session_id,
):
"""
Returns the summary of the application discovery session
:type session_id: :class:`str`
:param session_id: (required)
:rtype: :class:`com.vmware.nsx.model_client.AppDiscoverySessionResultSummary`
:return: com.vmware.nsx.model.AppDiscoverySessionResultSummary
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'session_id': session_id,
})
class _AppProfilesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'session_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/app-discovery/sessions/{session-id}/app-profiles',
path_variables={
'session_id': 'session-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'AppProfileListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.app_discovery.sessions.app_profiles',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _InstalledAppsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'session_id': type.StringType(),
'app_profile_id': type.OptionalType(type.StringType()),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
'vm_id': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/app-discovery/sessions/{session-id}/installed-apps',
path_variables={
'session_id': 'session-id',
},
query_parameters={
'app_profile_id': 'app_profile_id',
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
'vm_id': 'vm_id',
},
content_type='application/json'
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'AppInfoListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.app_discovery.sessions.installed_apps',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _NsGroupsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'session_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/app-discovery/sessions/{session-id}/ns-groups',
path_variables={
'session_id': 'session-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NSGroupMetaInfoListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.app_discovery.sessions.ns_groups',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _ReClassifyStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'session_id': type.StringType(),
'session_reclassification_parameter': type.ReferenceType('com.vmware.nsx.model_client', 'SessionReclassificationParameter'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/app-discovery/sessions/{session-id}/re-classify',
request_body_parameter='session_reclassification_parameter',
path_variables={
'session_id': 'session-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'AppDiscoverySessionResultSummary'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.app_discovery.sessions.re_classify',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _SummaryStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'session_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/app-discovery/sessions/{session-id}/summary',
path_variables={
'session_id': 'session-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'AppDiscoverySessionResultSummary'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.app_discovery.sessions.summary',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'AppProfiles': AppProfiles,
'InstalledApps': InstalledApps,
'NsGroups': NsGroups,
'ReClassify': ReClassify,
'Summary': Summary,
'ns_groups': 'com.vmware.nsx.app_discovery.sessions.ns_groups_client.StubFactory',
}
| 42.095761
| 136
| 0.596494
| 2,653
| 26,815
| 5.792688
| 0.082925
| 0.060906
| 0.063444
| 0.078084
| 0.881767
| 0.861661
| 0.83329
| 0.818324
| 0.804594
| 0.795029
| 0
| 0.001159
| 0.292336
| 26,815
| 636
| 137
| 42.16195
| 0.808706
| 0.268842
| 0
| 0.626598
| 1
| 0
| 0.284188
| 0.184989
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038363
| false
| 0
| 0.030691
| 0
| 0.12532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a0b967ba667456e32e7ff4e43e7fa76889a809a7
| 36,728
|
py
|
Python
|
pybind/nos/v7_1_0/monitor/session/span_command/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/monitor/session/span_command/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/monitor/session/span_command/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class span_command(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-span - based on the path /monitor/session/span-command. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__source','__src_tengigabitethernet','__src_tengigabitethernet_val','__destination','__dest_tengigabitethernet','__dest_tengigabitethernet_val','__dest_vlan_val','__direction',)
_yang_name = 'span-command'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__direction = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'both': {'value': 2}, u'rx': {'value': 1}, u'tx': {'value': 0}},), is_leaf=True, yang_name="direction", rest_name="direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Mirror Direction:Ingress or Egress or Both', u'cli-optional-in-sequence': None, u'display-when': u"(((../dest-tengigabitethernet = 'gigabitethernet') or\n(../dest-tengigabitethernet = 'tengigabitethernet') or\n(../dest-tengigabitethernet = 'fortygigabitethernet') or\n(../dest-tengigabitethernet = 'rspan-vlan') or \n (../dest-tengigabitethernet = 'hundredgigabitethernet')) and \n(../source))"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
self.__dest_tengigabitethernet_val = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="dest-tengigabitethernet-val", rest_name="dest-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'gigabitethernet' or\n../dest-tengigabitethernet = 'tengigabitethernet' or\n../dest-tengigabitethernet = 'fortygigabitethernet' or\n../dest-tengigabitethernet = 'hundredgigabitethernet'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)
self.__dest_vlan_val = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="dest-vlan-val", rest_name="dest-vlan-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'rspan-vlan'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='interface:vlan-type', is_config=True)
self.__destination = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'destination': {'value': 0}},), is_leaf=True, yang_name="destination", rest_name="destination", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
self.__src_tengigabitethernet = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="src-tengigabitethernet", rest_name="src-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
self.__source = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'source': {'value': 0}},), is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
self.__dest_tengigabitethernet = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'rspan-vlan': {'value': 4}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="dest-tengigabitethernet", rest_name="dest-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
self.__src_tengigabitethernet_val = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="src-tengigabitethernet-val", rest_name="src-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'monitor', u'session', u'span-command']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'monitor', u'session']
def _get_source(self):
"""
Getter method for source, mapped from YANG variable /monitor/session/span_command/source (enumeration)
"""
return self.__source
def _set_source(self, v, load=False):
"""
Setter method for source, mapped from YANG variable /monitor/session/span_command/source (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_source is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'source': {'value': 0}},), is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source must be of a type compatible with enumeration""",
'defined-type': "brocade-span:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'source': {'value': 0}},), is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)""",
})
self.__source = t
if hasattr(self, '_set'):
self._set()
def _unset_source(self):
self.__source = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'source': {'value': 0}},), is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
def _get_src_tengigabitethernet(self):
"""
Getter method for src_tengigabitethernet, mapped from YANG variable /monitor/session/span_command/src_tengigabitethernet (enumeration)
"""
return self.__src_tengigabitethernet
def _set_src_tengigabitethernet(self, v, load=False):
"""
Setter method for src_tengigabitethernet, mapped from YANG variable /monitor/session/span_command/src_tengigabitethernet (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_tengigabitethernet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_tengigabitethernet() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="src-tengigabitethernet", rest_name="src-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_tengigabitethernet must be of a type compatible with enumeration""",
'defined-type': "brocade-span:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="src-tengigabitethernet", rest_name="src-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)""",
})
self.__src_tengigabitethernet = t
if hasattr(self, '_set'):
self._set()
def _unset_src_tengigabitethernet(self):
self.__src_tengigabitethernet = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="src-tengigabitethernet", rest_name="src-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
def _get_src_tengigabitethernet_val(self):
"""
Getter method for src_tengigabitethernet_val, mapped from YANG variable /monitor/session/span_command/src_tengigabitethernet_val (span-if-type)
"""
return self.__src_tengigabitethernet_val
def _set_src_tengigabitethernet_val(self, v, load=False):
"""
Setter method for src_tengigabitethernet_val, mapped from YANG variable /monitor/session/span_command/src_tengigabitethernet_val (span-if-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_tengigabitethernet_val is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_tengigabitethernet_val() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="src-tengigabitethernet-val", rest_name="src-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_tengigabitethernet_val must be of a type compatible with span-if-type""",
'defined-type': "brocade-span:span-if-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="src-tengigabitethernet-val", rest_name="src-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)""",
})
self.__src_tengigabitethernet_val = t
if hasattr(self, '_set'):
self._set()
def _unset_src_tengigabitethernet_val(self):
self.__src_tengigabitethernet_val = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="src-tengigabitethernet-val", rest_name="src-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'(../source)', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)
def _get_destination(self):
"""
Getter method for destination, mapped from YANG variable /monitor/session/span_command/destination (enumeration)
"""
return self.__destination
def _set_destination(self, v, load=False):
"""
Setter method for destination, mapped from YANG variable /monitor/session/span_command/destination (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'destination': {'value': 0}},), is_leaf=True, yang_name="destination", rest_name="destination", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """destination must be of a type compatible with enumeration""",
'defined-type': "brocade-span:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'destination': {'value': 0}},), is_leaf=True, yang_name="destination", rest_name="destination", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)""",
})
self.__destination = t
if hasattr(self, '_set'):
self._set()
def _unset_destination(self):
self.__destination = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'destination': {'value': 0}},), is_leaf=True, yang_name="destination", rest_name="destination", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
def _get_dest_tengigabitethernet(self):
"""
Getter method for dest_tengigabitethernet, mapped from YANG variable /monitor/session/span_command/dest_tengigabitethernet (enumeration)
"""
return self.__dest_tengigabitethernet
def _set_dest_tengigabitethernet(self, v, load=False):
"""
Setter method for dest_tengigabitethernet, mapped from YANG variable /monitor/session/span_command/dest_tengigabitethernet (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_dest_tengigabitethernet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dest_tengigabitethernet() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'rspan-vlan': {'value': 4}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="dest-tengigabitethernet", rest_name="dest-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dest_tengigabitethernet must be of a type compatible with enumeration""",
'defined-type': "brocade-span:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'rspan-vlan': {'value': 4}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="dest-tengigabitethernet", rest_name="dest-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)""",
})
self.__dest_tengigabitethernet = t
if hasattr(self, '_set'):
self._set()
def _unset_dest_tengigabitethernet(self):
self.__dest_tengigabitethernet = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'rspan-vlan': {'value': 4}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 3}},), is_leaf=True, yang_name="dest-tengigabitethernet", rest_name="dest-tengigabitethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
def _get_dest_tengigabitethernet_val(self):
"""
Getter method for dest_tengigabitethernet_val, mapped from YANG variable /monitor/session/span_command/dest_tengigabitethernet_val (span-if-type)
"""
return self.__dest_tengigabitethernet_val
def _set_dest_tengigabitethernet_val(self, v, load=False):
"""
Setter method for dest_tengigabitethernet_val, mapped from YANG variable /monitor/session/span_command/dest_tengigabitethernet_val (span-if-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_dest_tengigabitethernet_val is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dest_tengigabitethernet_val() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="dest-tengigabitethernet-val", rest_name="dest-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'gigabitethernet' or\n../dest-tengigabitethernet = 'tengigabitethernet' or\n../dest-tengigabitethernet = 'fortygigabitethernet' or\n../dest-tengigabitethernet = 'hundredgigabitethernet'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dest_tengigabitethernet_val must be of a type compatible with span-if-type""",
'defined-type': "brocade-span:span-if-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="dest-tengigabitethernet-val", rest_name="dest-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'gigabitethernet' or\n../dest-tengigabitethernet = 'tengigabitethernet' or\n../dest-tengigabitethernet = 'fortygigabitethernet' or\n../dest-tengigabitethernet = 'hundredgigabitethernet'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)""",
})
self.__dest_tengigabitethernet_val = t
if hasattr(self, '_set'):
self._set()
def _unset_dest_tengigabitethernet_val(self):
self.__dest_tengigabitethernet_val = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..16']}), is_leaf=True, yang_name="dest-tengigabitethernet-val", rest_name="dest-tengigabitethernet-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'gigabitethernet' or\n../dest-tengigabitethernet = 'tengigabitethernet' or\n../dest-tengigabitethernet = 'fortygigabitethernet' or\n../dest-tengigabitethernet = 'hundredgigabitethernet'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='span-if-type', is_config=True)
def _get_dest_vlan_val(self):
"""
Getter method for dest_vlan_val, mapped from YANG variable /monitor/session/span_command/dest_vlan_val (interface:vlan-type)
"""
return self.__dest_vlan_val
def _set_dest_vlan_val(self, v, load=False):
"""
Setter method for dest_vlan_val, mapped from YANG variable /monitor/session/span_command/dest_vlan_val (interface:vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_dest_vlan_val is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dest_vlan_val() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="dest-vlan-val", rest_name="dest-vlan-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'rspan-vlan'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='interface:vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dest_vlan_val must be of a type compatible with interface:vlan-type""",
'defined-type': "interface:vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="dest-vlan-val", rest_name="dest-vlan-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'rspan-vlan'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='interface:vlan-type', is_config=True)""",
})
self.__dest_vlan_val = t
if hasattr(self, '_set'):
self._set()
def _unset_dest_vlan_val(self):
self.__dest_vlan_val = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="dest-vlan-val", rest_name="dest-vlan-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u"../dest-tengigabitethernet = 'rspan-vlan'"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='interface:vlan-type', is_config=True)
def _get_direction(self):
"""
Getter method for direction, mapped from YANG variable /monitor/session/span_command/direction (enumeration)
"""
return self.__direction
def _set_direction(self, v, load=False):
"""
Setter method for direction, mapped from YANG variable /monitor/session/span_command/direction (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_direction is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_direction() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'both': {'value': 2}, u'rx': {'value': 1}, u'tx': {'value': 0}},), is_leaf=True, yang_name="direction", rest_name="direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Mirror Direction:Ingress or Egress or Both', u'cli-optional-in-sequence': None, u'display-when': u"(((../dest-tengigabitethernet = 'gigabitethernet') or\n(../dest-tengigabitethernet = 'tengigabitethernet') or\n(../dest-tengigabitethernet = 'fortygigabitethernet') or\n(../dest-tengigabitethernet = 'rspan-vlan') or \n (../dest-tengigabitethernet = 'hundredgigabitethernet')) and \n(../source))"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """direction must be of a type compatible with enumeration""",
'defined-type': "brocade-span:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'both': {'value': 2}, u'rx': {'value': 1}, u'tx': {'value': 0}},), is_leaf=True, yang_name="direction", rest_name="direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Mirror Direction:Ingress or Egress or Both', u'cli-optional-in-sequence': None, u'display-when': u"(((../dest-tengigabitethernet = 'gigabitethernet') or\n(../dest-tengigabitethernet = 'tengigabitethernet') or\n(../dest-tengigabitethernet = 'fortygigabitethernet') or\n(../dest-tengigabitethernet = 'rspan-vlan') or \n (../dest-tengigabitethernet = 'hundredgigabitethernet')) and \n(../source))"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)""",
})
self.__direction = t
if hasattr(self, '_set'):
self._set()
def _unset_direction(self):
self.__direction = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'both': {'value': 2}, u'rx': {'value': 1}, u'tx': {'value': 0}},), is_leaf=True, yang_name="direction", rest_name="direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Mirror Direction:Ingress or Egress or Both', u'cli-optional-in-sequence': None, u'display-when': u"(((../dest-tengigabitethernet = 'gigabitethernet') or\n(../dest-tengigabitethernet = 'tengigabitethernet') or\n(../dest-tengigabitethernet = 'fortygigabitethernet') or\n(../dest-tengigabitethernet = 'rspan-vlan') or \n (../dest-tengigabitethernet = 'hundredgigabitethernet')) and \n(../source))"}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='enumeration', is_config=True)
source = __builtin__.property(_get_source, _set_source)
src_tengigabitethernet = __builtin__.property(_get_src_tengigabitethernet, _set_src_tengigabitethernet)
src_tengigabitethernet_val = __builtin__.property(_get_src_tengigabitethernet_val, _set_src_tengigabitethernet_val)
destination = __builtin__.property(_get_destination, _set_destination)
dest_tengigabitethernet = __builtin__.property(_get_dest_tengigabitethernet, _set_dest_tengigabitethernet)
dest_tengigabitethernet_val = __builtin__.property(_get_dest_tengigabitethernet_val, _set_dest_tengigabitethernet_val)
dest_vlan_val = __builtin__.property(_get_dest_vlan_val, _set_dest_vlan_val)
direction = __builtin__.property(_get_direction, _set_direction)
_pyangbind_elements = {'source': source, 'src_tengigabitethernet': src_tengigabitethernet, 'src_tengigabitethernet_val': src_tengigabitethernet_val, 'destination': destination, 'dest_tengigabitethernet': dest_tengigabitethernet, 'dest_tengigabitethernet_val': dest_tengigabitethernet_val, 'dest_vlan_val': dest_vlan_val, 'direction': direction, }
| 99.804348
| 1,004
| 0.714223
| 4,579
| 36,728
| 5.508845
| 0.045206
| 0.083726
| 0.03774
| 0.039326
| 0.907512
| 0.878771
| 0.862676
| 0.859386
| 0.855778
| 0.846224
| 0
| 0.006798
| 0.142915
| 36,728
| 367
| 1,005
| 100.076294
| 0.79453
| 0.115253
| 0
| 0.469298
| 0
| 0.061404
| 0.441849
| 0.235159
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0
| 0.035088
| 0
| 0.27193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
264eb499c72e7bcbba58cd6e3b0dcfe130eb67ce
| 116,246
|
py
|
Python
|
chars.py
|
idootop/qz_killer
|
ae05da9cec9aab1bfcd6a86f527442ee38f0b40a
|
[
"MIT"
] | 4
|
2019-12-23T07:13:33.000Z
|
2021-02-21T08:44:40.000Z
|
chars.py
|
idootop/qz_killer
|
ae05da9cec9aab1bfcd6a86f527442ee38f0b40a
|
[
"MIT"
] | null | null | null |
chars.py
|
idootop/qz_killer
|
ae05da9cec9aab1bfcd6a86f527442ee38f0b40a
|
[
"MIT"
] | null | null | null |
chars = {
'1':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
'1':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
'1':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'1':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'1':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
'2':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'2':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'2':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'3':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'3':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'3':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'3':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'4':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'4':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'4':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'4':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'4':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'5':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'5':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'6':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'7':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'7':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'8':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'8':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'8':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'9':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'9':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'9':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'9':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'9':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'a':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'a':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'a':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'a':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
'b':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'b':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'b':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'c':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'd':[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'd':[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'e':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'f':[1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'f':[1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'g':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'g':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'h':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'h':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'h':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'i':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'i':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'i':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'j':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'j':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'k':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'k':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'l':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'l':[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'm':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'n':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'p':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'p':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'q':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
'q':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
'q':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
'r':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'r':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
's':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
't':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
't':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
't':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'u':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'v':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'v':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'w':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
'w':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'x':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'y':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'y':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'y':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
'y':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'z':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'z':[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
}
| 1,263.543478
| 1,305
| 0.331555
| 38,538
| 116,246
| 1.000104
| 0.000934
| 1.467231
| 2.03334
| 2.495356
| 0.999689
| 0.999689
| 0.999689
| 0.999689
| 0.999689
| 0.999689
| 0
| 0.496888
| 0.333844
| 116,246
| 91
| 1,306
| 1,277.428571
| 0.000826
| 0
| 0
| 0
| 0
| 0
| 0.000766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
cd143c21716d1b02c861cacac4877aa6df7731e2
| 810
|
py
|
Python
|
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P06_OperationsBetweenNumbers.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P06_OperationsBetweenNumbers.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P06_OperationsBetweenNumbers.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1
|
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
N1 = int(input())
N2 = int(input())
operator = input()
if operator == "+" and (N1 + N2) % 2 == 0:
print(f'{N1} + {N2} = {N1 + N2} - even')
elif operator == "+" and (N1 + N2) % 2 != 0:
print(f'{N1} + {N2} = {N1 + N2} - odd')
elif operator == "-" and (N1 - N2) % 2 == 0:
print(f'{N1} - {N2} = {N1 - N2} - even')
elif operator == "-" and (N1 - N2) % 2 != 0:
print(f'{N1} - {N2} = {N1 - N2} - odd')
elif operator == "*" and (N1 * N2) % 2 == 0:
print(f'{N1} * {N2} = {N1 * N2} - even')
elif operator == "*" and (N1 * N2) % 2 != 0:
print(f'{N1} * {N2} = {N1 * N2} - odd')
elif (operator == "/" or operator == "%") and N2 == 0:
print(f'Cannot divide {N1} by zero')
elif operator == "/":
print(f'{N1} / {N2} = {N1 / N2:.2f}')
elif operator == "%":
print(f'{N1} % {N2} = {N1 % N2}')
| 36.818182
| 54
| 0.45679
| 123
| 810
| 3.00813
| 0.162602
| 0.237838
| 0.172973
| 0.216216
| 0.786486
| 0.786486
| 0.786486
| 0.786486
| 0.645946
| 0.645946
| 0
| 0.103333
| 0.259259
| 810
| 22
| 55
| 36.818182
| 0.513333
| 0
| 0
| 0
| 0
| 0
| 0.324291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
26ad56b7d11869adeaf2b4463b22dd898dd0c475
| 35
|
py
|
Python
|
lib/python3.4/site-packages/flask/testsuite/test_apps/flaskext/oldext_package/submodule.py
|
LChristakis/chalice-hunter
|
6bffea4620e23ce9ff12ac30526ebafcb9c10058
|
[
"MIT"
] | 21,684
|
2015-01-01T03:42:20.000Z
|
2022-03-30T13:32:44.000Z
|
lib/python3.4/site-packages/flask/testsuite/test_apps/flaskext/oldext_package/submodule.py
|
LChristakis/chalice-hunter
|
6bffea4620e23ce9ff12ac30526ebafcb9c10058
|
[
"MIT"
] | 4,067
|
2015-01-01T00:04:51.000Z
|
2022-03-30T13:42:56.000Z
|
lib/python3.4/site-packages/flask/testsuite/test_apps/flaskext/oldext_package/submodule.py
|
LChristakis/chalice-hunter
|
6bffea4620e23ce9ff12ac30526ebafcb9c10058
|
[
"MIT"
] | 1,901
|
2015-01-01T21:05:59.000Z
|
2022-03-21T08:14:25.000Z
|
def test_function():
return 42
| 11.666667
| 20
| 0.685714
| 5
| 35
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0.228571
| 35
| 2
| 21
| 17.5
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e47da2d93ac65b12a253a6a652446fefced808ed
| 57
|
py
|
Python
|
app/stocks/config.py
|
Monxun/DjangoMLDocker
|
f34ddbc2f504054ed32ed1fb66a0a77c461350dd
|
[
"MIT"
] | null | null | null |
app/stocks/config.py
|
Monxun/DjangoMLDocker
|
f34ddbc2f504054ed32ed1fb66a0a77c461350dd
|
[
"MIT"
] | null | null | null |
app/stocks/config.py
|
Monxun/DjangoMLDocker
|
f34ddbc2f504054ed32ed1fb66a0a77c461350dd
|
[
"MIT"
] | null | null | null |
tiingo_token = '7805c033e3cd1d7e1b3aa071522e811893a28676'
| 57
| 57
| 0.912281
| 3
| 57
| 17
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.527273
| 0.035088
| 57
| 1
| 57
| 57
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0.689655
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e4a56c8f65d045b5b2f1c7c30f44299ba89dcbd1
| 25,123
|
py
|
Python
|
etl/sql_scripts.py
|
ClinicalTrialsTeam/CTFrontier
|
d7e2558f314f6bbd9964667e12ee5655bc64215b
|
[
"Apache-2.0"
] | 4
|
2021-03-07T02:16:22.000Z
|
2022-03-13T03:22:42.000Z
|
etl/sql_scripts.py
|
ClinicalTrialsTeam/CTFrontier
|
d7e2558f314f6bbd9964667e12ee5655bc64215b
|
[
"Apache-2.0"
] | 8
|
2021-03-14T22:14:07.000Z
|
2021-04-26T17:20:56.000Z
|
etl/sql_scripts.py
|
ClinicalTrialsTeam/CTFrontier
|
d7e2558f314f6bbd9964667e12ee5655bc64215b
|
[
"Apache-2.0"
] | 1
|
2021-03-07T02:16:39.000Z
|
2021-03-07T02:16:39.000Z
|
class SqlScripts:
last_run_date = """
SELECT last_run_date
FROM ctgov.etl
"""
update_count_from_ctti = """
SELECT COUNT(*)
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s |
UPDATE ctgov.etl
SET update_max_rows = %s
"""
insert_count_from_ctti = """
SELECT COUNT(*)
FROM ctgov.studies
WHERE study_first_submitted_date >= %s |
UPDATE ctgov.etl
SET insert_max_rows = %s
"""
get_etl_metadata = """
SELECT last_run_date,
insert_current_offset,
insert_max_rows,
update_current_offset,
update_max_rows
FROM ctgov.etl
"""
insert_studies = """
SELECT nct_id, nlm_download_date_description,
study_first_submitted_date, results_first_submitted_date,
disposition_first_submitted_date, last_update_submitted_date,
study_first_submitted_qc_date, study_first_posted_date,
study_first_posted_date_type, results_first_submitted_qc_date,
results_first_posted_date, results_first_posted_date_type,
disposition_first_submitted_qc_date, disposition_first_posted_date,
disposition_first_posted_date_type, last_update_submitted_qc_date,
last_update_posted_date, last_update_posted_date_type,
start_month_year, start_date_type, start_date,
verification_month_year, verification_date,
completion_month_year, completion_date_type, completion_date,
primary_completion_month_year, primary_completion_date_type,
primary_completion_date, target_duration, study_type, acronym,
baseline_population, brief_title, official_title, overall_status,
last_known_status, phase, enrollment, enrollment_type, source,
limitations_and_caveats, number_of_arms, number_of_groups,
why_stopped, has_expanded_access, expanded_access_type_individual,
expanded_access_type_intermediate, expanded_access_type_treatment,
has_dmc, is_fda_regulated_drug, is_fda_regulated_device,
is_unapproved_device, is_ppsd, is_us_export, biospec_retention,
biospec_description, ipd_time_frame, ipd_access_criteria, ipd_url,
plan_to_share_ipd, plan_to_share_ipd_description, created_at,
updated_at
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s |
INSERT INTO ctgov.studies(nct_id, nlm_download_date_description,
study_first_submitted_date,
results_first_submitted_date,
disposition_first_submitted_date,
last_update_submitted_date,
study_first_submitted_qc_date,
study_first_posted_date,
study_first_posted_date_type,
results_first_submitted_qc_date,
results_first_posted_date,
results_first_posted_date_type,
disposition_first_submitted_qc_date,
disposition_first_posted_date,
disposition_first_posted_date_type,
last_update_submitted_qc_date,
last_update_posted_date,
last_update_posted_date_type, start_month_year,
start_date_type, start_date,
verification_month_year, verification_date,
completion_month_year, completion_date_type,
completion_date, primary_completion_month_year,
primary_completion_date_type,
primary_completion_date, target_duration,
study_type, acronym, baseline_population,
brief_title, official_title, overall_status,
last_known_status, phase, enrollment,
enrollment_type, source, limitations_and_caveats,
number_of_arms, number_of_groups, why_stopped,
has_expanded_access,
expanded_access_type_individual,
expanded_access_type_intermediate,
expanded_access_type_treatment, has_dmc,
is_fda_regulated_drug, is_fda_regulated_device,
is_unapproved_device, is_ppsd, is_us_export,
biospec_retention, biospec_description,
ipd_time_frame, ipd_access_criteria, ipd_url,
plan_to_share_ipd, plan_to_share_ipd_description,
created_at, updated_at)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
update_studies = """
SELECT nlm_download_date_description, study_first_submitted_date,
results_first_submitted_date, disposition_first_submitted_date,
last_update_submitted_date, study_first_submitted_qc_date,
study_first_posted_date, study_first_posted_date_type,
results_first_submitted_qc_date, results_first_posted_date,
results_first_posted_date_type, disposition_first_submitted_qc_date,
disposition_first_posted_date, disposition_first_posted_date_type,
last_update_submitted_qc_date, last_update_posted_date,
last_update_posted_date_type, start_month_year, start_date_type,
start_date, verification_month_year, verification_date,
completion_month_year, completion_date_type, completion_date,
primary_completion_month_year, primary_completion_date_type,
primary_completion_date, target_duration, study_type, acronym,
baseline_population, brief_title, official_title, overall_status,
last_known_status, phase, enrollment, enrollment_type, source,
limitations_and_caveats, number_of_arms, number_of_groups,
why_stopped, has_expanded_access, expanded_access_type_individual,
expanded_access_type_intermediate, expanded_access_type_treatment,
has_dmc, is_fda_regulated_drug, is_fda_regulated_device,
is_unapproved_device, is_ppsd, is_us_export, biospec_retention,
biospec_description, ipd_time_frame, ipd_access_criteria, ipd_url,
plan_to_share_ipd, plan_to_share_ipd_description, created_at,
updated_at, nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s |
UPDATE ctgov.studies
SET nlm_download_date_description=%s,
study_first_submitted_date=%s,
results_first_submitted_date=%s,
disposition_first_submitted_date=%s,
last_update_submitted_date=%s,
study_first_submitted_qc_date=%s,
study_first_posted_date=%s,
study_first_posted_date_type=%s,
results_first_submitted_qc_date=%s,
results_first_posted_date=%s,
results_first_posted_date_type=%s,
disposition_first_submitted_qc_date=%s,
disposition_first_posted_date=%s,
disposition_first_posted_date_type=%s,
last_update_submitted_qc_date=%s,
last_update_posted_date=%s,
last_update_posted_date_type=%s,
start_month_year=%s,
start_date_type=%s,
start_date=%s,
verification_month_year=%s,
verification_date=%s,
completion_month_year=%s,
completion_date_type=%s,
completion_date=%s,
primary_completion_month_year=%s,
primary_completion_date_type=%s,
primary_completion_date=%s,
target_duration=%s,
study_type=%s,
acronym=%s,
baseline_population=%s,
brief_title=%s,
official_title=%s,
overall_status=%s,
last_known_status=%s,
phase=%s,
enrollment=%s,
enrollment_type=%s,
source=%s,
limitations_and_caveats=%s,
number_of_arms=%s,
number_of_groups=%s,
why_stopped=%s,
has_expanded_access=%s,
expanded_access_type_individual=%s,
expanded_access_type_intermediate=%s,
expanded_access_type_treatment=%s,
has_dmc=%s,
is_fda_regulated_drug=%s,
is_fda_regulated_device=%s,
is_unapproved_device=%s,
is_ppsd=%s,
is_us_export=%s,
biospec_retention=%s,
biospec_description=%s,
ipd_time_frame=%s,
ipd_access_criteria=%s,
ipd_url=%s,
plan_to_share_ipd=%s,
plan_to_share_ipd_description=%s,
created_at=%s,
updated_at=%s
WHERE nct_id = %s;
"""
insert_conditions = """
SELECT id, nct_id, name, downcase_name
FROM ctgov.conditions
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.conditions(id, nct_id, name, downcase_name)
VALUES (%s, %s, %s, %s);
"""
update_conditions = """
SELECT name, downcase_name, id
FROM ctgov.conditions
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.conditions
SET name=%s,
downcase_name=%s
WHERE id = %s
"""
insert_detailed_descriptions = """
SELECT id, nct_id, description
FROM ctgov.detailed_descriptions
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.detailed_descriptions(id, nct_id, description)
VALUES (%s, %s, %s)
"""
update_detailed_descriptions = """
SELECT description, id
FROM ctgov.detailed_descriptions
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.detailed_descriptions
SET description= %s
WHERE id = %s
"""
insert_countries = """
SELECT id, nct_id, name, removed
FROM ctgov.countries
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.countries(id, nct_id, name, removed)
VALUES (%s, %s, %s, %s);
"""
update_countries = """
SELECT name, removed, id
FROM ctgov.countries
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.countries
SET name= %s, removed= %s
WHERE id = %s;
"""
insert_interventions = """
SELECT id, nct_id, intervention_type, name, description
FROM ctgov.interventions
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.interventions
(id, nct_id, intervention_type, name, description)
VALUES (%s, %s, %s, %s, %s);
"""
update_interventions = """
SELECT intervention_type, name, description, id
FROM ctgov.interventions
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.interventions
SET intervention_type=%s, name=%s, description=%s
WHERE id = %s;
"""
insert_keywords = """
SELECT id, nct_id, name, downcase_name
FROM ctgov.keywords
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.keywords(id, nct_id, name, downcase_name)
VALUES (%s, %s, %s, %s);
"""
update_keywords = """
SELECT name, downcase_name, id
FROM ctgov.keywords
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.keywords
SET name=%s, downcase_name=%s
WHERE id = %s;
"""
insert_eligibilities = """
SELECT id, nct_id, sampling_method, gender, minimum_age, maximum_age,
healthy_volunteers, population, criteria, gender_description,
gender_based
FROM ctgov.eligibilities
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.eligibilities(id, nct_id, sampling_method, gender,
minimum_age, maximum_age,
healthy_volunteers, population, criteria,
gender_description, gender_based)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
update_eligibilities = """
SELECT sampling_method, gender, minimum_age, maximum_age,
healthy_volunteers, population, criteria, gender_description,
gender_based, id
FROM ctgov.eligibilities
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.eligibilities
SET sampling_method=%s, gender=%s, minimum_age=%s, maximum_age=%s,
healthy_volunteers=%s, population=%s, criteria=%s,
gender_description=%s, gender_based=%s
WHERE id = %s;
"""
insert_facilities = """
SELECT id, nct_id, status, name, city, state, zip, country
FROM ctgov.facilities
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.facilities(
id, nct_id, status, name, city, state, zip, country)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
"""
update_facilities = """
SELECT status, name, city, state, zip, country, id
FROM ctgov.facilities
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.facilities
SET status=%s, name=%s, city=%s, state=%s, zip=%s, country=%s
WHERE id = %s;
"""
insert_brief_summaries = """
SELECT id, nct_id, description
FROM ctgov.brief_summaries
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.brief_summaries(id, nct_id, description)
VALUES (%s, %s, %s);
"""
update_brief_summaries = """
SELECT description, id
FROM ctgov.brief_summaries
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.brief_summaries
SET description=%s
WHERE id = %s;
"""
insert_sponsors = """
SELECT id, nct_id, agency_class, lead_or_collaborator, name
FROM ctgov.sponsors
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.sponsors(
id, nct_id, agency_class, lead_or_collaborator, name)
VALUES (%s, %s, %s, %s, %s);
"""
update_sponsors = """
SELECT agency_class, lead_or_collaborator, name, id
FROM ctgov.sponsors
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.sponsors
SET agency_class=%s, lead_or_collaborator=%s, name=%s
WHERE id = %s;
"""
# Handle Deletes
insert_outcomes = """
SELECT id, nct_id, outcome_type, title, description, time_frame,
population, anticipated_posting_date, anticipated_posting_month_year,
units, units_analyzed, dispersion_type, param_type
FROM ctgov.outcomes
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.outcomes(
id, nct_id, outcome_type, title, description, time_frame, population,
anticipated_posting_date, anticipated_posting_month_year, units,
units_analyzed, dispersion_type, param_type)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
update_outcomes = """
SELECT outcome_type, title, description, time_frame, population,
anticipated_posting_date, anticipated_posting_month_year, units,
units_analyzed, dispersion_type, param_type, id
FROM ctgov.outcomes
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.outcomes
SET outcome_type=%s, title=%s, description=%s, time_frame=%s,
population=%s, anticipated_posting_date=%s,
anticipated_posting_month_year=%s, units=%s, units_analyzed=%s,
dispersion_type=%s, param_type=%s
WHERE id = %s;
"""
insert_results_groups = """
SELECT id, nct_id, ctgov_group_code, result_type, title, description
FROM ctgov.result_groups
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.result_groups(
id, nct_id, ctgov_group_code, result_type, title, description)
VALUES (%s, %s, %s, %s, %s, %s);
"""
update_results_groups = """
SELECT ctgov_group_code, result_type, title, description, id
FROM ctgov.result_groups
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.result_groups
SET ctgov_group_code=%s, result_type=%s, title=%s, description=%s
WHERE id = %s;
"""
insert_outcome_measures = """
SELECT id, nct_id, outcome_id, result_group_id, ctgov_group_code,
classification, category, title, description, units, param_type,
param_value, param_value_num, dispersion_type, dispersion_value,
dispersion_value_num, dispersion_lower_limit,
dispersion_upper_limit, explanation_of_na
FROM ctgov.outcome_measurements
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.outcome_measurements(
id, nct_id, outcome_id, result_group_id, ctgov_group_code, classification,
category, title, description, units, param_type, param_value,
param_value_num, dispersion_type, dispersion_value, dispersion_value_num,
dispersion_lower_limit, dispersion_upper_limit, explanation_of_na)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s);
"""
update_outcome_measures = """
SELECT outcome_id, result_group_id, ctgov_group_code,
classification, category, title, description, units, param_type,
param_value, param_value_num, dispersion_type, dispersion_value,
dispersion_value_num, dispersion_lower_limit,
dispersion_upper_limit, explanation_of_na, id
FROM ctgov.outcome_measurements
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.outcome_measurements
SET outcome_id=%s, result_group_id=%s, ctgov_group_code=%s,
classification=%s, category=%s, title=%s, description=%s, units=%s,
param_type=%s, param_value=%s, param_value_num=%s, dispersion_type=%s,
dispersion_value=%s, dispersion_value_num=%s,
dispersion_lower_limit=%s, dispersion_upper_limit=%s,
explanation_of_na=%s
WHERE id = %s;
"""
insert_id_information = """
SELECT id, nct_id, id_type, id_value
FROM ctgov.id_information
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.id_information(id, nct_id, id_type, id_value)
VALUES (%s, %s, %s, %s);
"""
update_id_information = """
SELECT id_type, id_value, id
FROM ctgov.id_information
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.id_information
SET id_type=%s, id_value=%s
WHERE id = %s;
"""
insert_documents = """
SELECT id, nct_id, document_id, document_type, url, comment
FROM ctgov.documents
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE study_first_submitted_date >= %s
ORDER BY created_at LIMIT %s OFFSET %s) |
INSERT INTO ctgov.documents(
id, nct_id, document_id, document_type, url, comment)
VALUES (%s, %s, %s, %s, %s, %s);
"""
update_documents = """
SELECT document_id, document_type, url, comment, id
FROM ctgov.documents
WHERE nct_id IN (SELECT nct_id
FROM ctgov.studies
WHERE last_update_submitted_date >= %s
AND study_first_submitted_date < %s
ORDER BY created_at LIMIT %s OFFSET %s) |
UPDATE ctgov.documents
SET document_id=%s, document_type=%s, url=%s, comment=%s
WHERE id = %s;
"""
update_last_run_date = """
UPDATE ctgov.etl
SET last_run_date = %s
"""
rebuild_search_studies = """
REFRESH MATERIALIZED VIEW ctgov.search_studies
"""
rebuild_all_sponsors_type = """
REFRESH MATERIALIZED VIEW ctgov.all_sponsors_type
"""
rebuild_all_documents = """
REFRESH MATERIALIZED VIEW ctgov.all_documents
"""
| 41.801997
| 80
| 0.599968
| 2,956
| 25,123
| 4.728349
| 0.05548
| 0.020748
| 0.027903
| 0.032911
| 0.805108
| 0.771052
| 0.745367
| 0.716892
| 0.714316
| 0.678472
| 0
| 0
| 0.325479
| 25,123
| 600
| 81
| 41.871667
| 0.824796
| 0.000557
| 0
| 0.504488
| 0
| 0.016158
| 0.946423
| 0.248745
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.070018
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e4f37bf7eeedb679f1866cb93d39b9dc1c1aaa19
| 639
|
py
|
Python
|
tools/accuracy_checker/__init__.py
|
shinh/dldt
|
693ab4e79a428e0801f17f4511b129a3fa8f4a62
|
[
"Apache-2.0"
] | 1
|
2021-02-20T21:48:36.000Z
|
2021-02-20T21:48:36.000Z
|
tools/accuracy_checker/__init__.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | null | null | null |
tools/accuracy_checker/__init__.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | 1
|
2021-02-19T01:06:12.000Z
|
2021-02-19T01:06:12.000Z
|
from .accuracy_checker import (
annotation_converters,
adapters,
config,
data_readers,
launcher,
metrics,
postprocessor,
preprocessor,
representation,
dataset,
dependency,
logging,
main,
model_evaluator,
presenters,
progress_reporters,
utils
)
__all__ = [
'annotation_converters',
'adapters',
'config',
'data_readers',
'launcher',
'metrics',
'postprocessor',
'preprocessor',
'representation',
'dataset',
'dependency',
'logging',
'main',
'model_evaluator',
'presenters',
'progress_reporters',
'utils'
]
| 15.975
| 31
| 0.605634
| 47
| 639
| 7.957447
| 0.553191
| 0.106952
| 0.149733
| 0.181818
| 0.925134
| 0.925134
| 0.925134
| 0.925134
| 0.925134
| 0.925134
| 0
| 0
| 0.28169
| 639
| 39
| 32
| 16.384615
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0.276995
| 0.032864
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.026316
| 0
| 0.026316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
902948b991c801ef769b540922a6d6536cafe4fc
| 1,401
|
py
|
Python
|
test/TestDasTime.py
|
Septaris/das2py
|
ee657afe706f4aec6e016b17b63a093ed271f7f6
|
[
"MIT"
] | null | null | null |
test/TestDasTime.py
|
Septaris/das2py
|
ee657afe706f4aec6e016b17b63a093ed271f7f6
|
[
"MIT"
] | null | null | null |
test/TestDasTime.py
|
Septaris/das2py
|
ee657afe706f4aec6e016b17b63a093ed271f7f6
|
[
"MIT"
] | null | null | null |
import das2
import unittest
class TestDasTime(unittest.TestCase):
def test_floor(self):
dt1 = das2.DasTime('2014-01-01T12:57:34.445')
dt2 = das2.DasTime('2014-01-01T12:57')
dt1.floor(60)
self.assertEqual(dt1, dt2)
dt1 = das2.DasTime('2014-01-01T12:57:34.445')
dt2 = das2.DasTime('2014-01-01T12:57:30')
dt1.floor(25)
self.assertEqual(dt1, dt2)
dt1 = das2.DasTime('2014-01-01T12:57:34.445')
dt2 = das2.DasTime('2014-01-01T12:50')
dt1.floor(600)
self.assertEqual(dt1, dt2)
dt1 = das2.DasTime('2014-01-01T12:57:34.445')
dt2 = das2.DasTime('2014-01-01T12:40')
dt1.floor(1200)
self.assertEqual(dt1, dt2)
dt1 = das2.DasTime('2014-01-01T12:57:34.445')
dt2 = das2.DasTime('2014-01-01T12:00')
dt1.floor(3600)
self.assertEqual(dt1, dt2)
def test_ceil(self):
dt1 = das2.DasTime('2014-01-01T12:07:34.445')
dt2 = das2.DasTime('2014-01-01T12:08')
dt1.ceil(60)
self.assertEqual(dt1, dt2)
dt1 = das2.DasTime('2014-01-01T12:07:34.445')
dt2 = das2.DasTime('2014-01-01T12:10')
dt1.ceil(600)
self.assertEqual(dt1, dt2)
dt1 = das2.DasTime('2014-01-01T12:07:34.445')
dt2 = das2.DasTime('2014-01-01T12:20')
dt1.ceil(1200)
self.assertEqual(dt1, dt2)
dt1 = das2.DasTime('2014-01-01T12:07:34.445')
dt2 = das2.DasTime('2014-01-01T13:00')
dt1.ceil(3600)
self.assertEqual(dt1, dt2)
if __name__ == '__main__':
unittest.main()
| 24.155172
| 47
| 0.668808
| 229
| 1,401
| 4.048035
| 0.165939
| 0.213592
| 0.291262
| 0.330097
| 0.79288
| 0.738943
| 0.738943
| 0.730313
| 0.730313
| 0.730313
| 0
| 0.29608
| 0.144183
| 1,401
| 57
| 48
| 24.578947
| 0.477064
| 0
| 0
| 0.418605
| 0
| 0
| 0.258387
| 0.147752
| 0
| 0
| 0
| 0
| 0.209302
| 1
| 0.046512
| false
| 0
| 0.046512
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
903e3441c05cc7bda0f6a746497f38127133ab56
| 165
|
py
|
Python
|
config/__init__.py
|
AbhishekRana23/TeleTor
|
864d528b3afbb12ac03d371e866475801ee5e0ab
|
[
"Apache-2.0"
] | 69
|
2018-11-30T11:22:50.000Z
|
2021-12-05T20:30:53.000Z
|
config/__init__.py
|
AbhishekRana23/TeleTor
|
864d528b3afbb12ac03d371e866475801ee5e0ab
|
[
"Apache-2.0"
] | 2
|
2019-02-24T09:33:25.000Z
|
2020-03-23T12:52:17.000Z
|
config/__init__.py
|
AbhishekRana23/TeleTor
|
864d528b3afbb12ac03d371e866475801ee5e0ab
|
[
"Apache-2.0"
] | 28
|
2019-03-28T16:32:00.000Z
|
2022-02-07T13:14:24.000Z
|
from .settings import telegram_bot, LOG_LEVEL, auth_cfg, torrents_cfg, favourites
__all__ = ['telegram_bot', 'LOG_LEVEL', 'auth_cfg', 'torrents_cfg', 'favourites']
| 41.25
| 81
| 0.769697
| 22
| 165
| 5.227273
| 0.545455
| 0.191304
| 0.243478
| 0.330435
| 0.817391
| 0.817391
| 0.817391
| 0.817391
| 0.817391
| 0
| 0
| 0
| 0.09697
| 165
| 3
| 82
| 55
| 0.771812
| 0
| 0
| 0
| 0
| 0
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
5f3faf1058464a7354fde6f57ee8e62eb344bd30
| 10,161
|
py
|
Python
|
app/tests/routers/test_stars.py
|
NewShadesDAO/api
|
1e66336f0ea526f245918ecdc328c9a66280be91
|
[
"CC0-1.0"
] | 1
|
2022-03-21T07:37:02.000Z
|
2022-03-21T07:37:02.000Z
|
app/tests/routers/test_stars.py
|
NewShadesDAO/api
|
1e66336f0ea526f245918ecdc328c9a66280be91
|
[
"CC0-1.0"
] | 25
|
2022-01-16T13:18:21.000Z
|
2022-03-29T13:08:19.000Z
|
app/tests/routers/test_stars.py
|
NewShadesDAO/api
|
1e66336f0ea526f245918ecdc328c9a66280be91
|
[
"CC0-1.0"
] | 1
|
2022-01-15T21:42:00.000Z
|
2022-01-15T21:42:00.000Z
|
from typing import Callable
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from pymongo.database import Database
from app.models.channel import Channel
from app.models.message import Message
from app.models.server import Server
from app.models.user import User
class TestStarsRoutes:
@pytest.mark.asyncio
async def test_create_star_channel_message(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, channel_message: Message
):
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
json_response = response.json()
assert json_response["type"] == "message"
assert json_response["message"] == str(channel_message.id)
@pytest.mark.asyncio
async def test_create_star_dm(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, direct_message: Message
):
data = {"message": str(direct_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
json_response = response.json()
assert json_response["type"] == "message"
assert json_response["message"] == str(direct_message.id)
@pytest.mark.asyncio
async def test_create_star_server_channel(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, server_channel: Channel
):
data = {"channel": str(server_channel.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
json_response = response.json()
assert json_response["type"] == "channel"
assert json_response["channel"] == str(server_channel.id)
@pytest.mark.asyncio
async def test_create_star_dm_channel(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, dm_channel: Channel
):
data = {"channel": str(dm_channel.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
json_response = response.json()
assert json_response["type"] == "channel"
assert json_response["channel"] == str(dm_channel.id)
@pytest.mark.asyncio
async def test_create_star_server(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, server: Server
):
data = {"server": str(server.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
json_response = response.json()
assert json_response["type"] == "server"
assert json_response["server"] == str(server.id)
@pytest.mark.asyncio
async def test_create_star_random(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient
):
data = {"random": "whatevz"}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 400
@pytest.mark.asyncio
async def test_create_star_multiple_times(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, channel_message: Message
):
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 1
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 400
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 1
@pytest.mark.asyncio
async def test_create_same_star_multiple_users(
self,
app: FastAPI,
db: Database,
current_user: User,
authorized_client: AsyncClient,
guest_user: User,
get_authorized_client: Callable,
channel_message: Message,
):
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 1
guest_client = await get_authorized_client(guest_user)
response = await guest_client.post("/stars", json=data)
assert response.status_code == 201
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 1
guest_stars = (await guest_client.get("/stars")).json()
assert len(guest_stars) == 1
@pytest.mark.asyncio
async def test_list_stars(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, channel_message: Message
):
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 0
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 1
assert stars[0]["type"] == "message"
assert stars[0]["message"] == str(channel_message.id)
@pytest.mark.asyncio
async def test_delete_star(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, channel_message: Message
):
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
star_id = response.json()["id"]
response = await authorized_client.delete(f"/stars/{star_id}")
assert response.status_code == 204
@pytest.mark.asyncio
async def test_delete_star_multiple_times(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, channel_message: Message
):
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
star_id = response.json()["id"]
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 1
response = await authorized_client.delete(f"/stars/{star_id}")
assert response.status_code == 204
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 0
response = await authorized_client.delete(f"/stars/{star_id}")
assert response.status_code == 204
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 0
response = await authorized_client.delete(f"/stars/{star_id}")
assert response.status_code == 204
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 0
@pytest.mark.asyncio
async def test_delete_other_user_star(
self,
app: FastAPI,
db: Database,
current_user: User,
authorized_client: AsyncClient,
guest_user: User,
get_authorized_client: Callable,
channel_message: Message,
):
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
star_id = response.json()["id"]
stars = (await authorized_client.get("/stars")).json()
assert len(stars) == 1
guest_client = await get_authorized_client(guest_user)
response = await guest_client.delete(f"/stars/{star_id}")
assert response.status_code == 403
@pytest.mark.asyncio
async def test_get_message_stars(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, channel_message: Message
):
data = {"message": str(channel_message.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
assert (await authorized_client.get("/stars?type=channel")).json() == []
assert (await authorized_client.get("/stars?type=server")).json() == []
response = await authorized_client.get("/stars?type=message")
assert response.status_code == 200
stars = response.json()
assert len(stars) == 1
assert stars[0]["type"] == "message"
assert stars[0]["message"] == data["message"]
@pytest.mark.asyncio
async def test_get_channel_stars(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, server_channel: Channel
):
data = {"channel": str(server_channel.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
assert (await authorized_client.get("/stars?type=message")).json() == []
assert (await authorized_client.get("/stars?type=server")).json() == []
response = await authorized_client.get("/stars?type=channel")
assert response.status_code == 200
stars = response.json()
assert len(stars) == 1
assert stars[0]["type"] == "channel"
assert stars[0]["channel"] == data["channel"]
@pytest.mark.asyncio
async def test_get_server_stars(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, server: Server
):
data = {"server": str(server.id)}
response = await authorized_client.post("/stars", json=data)
assert response.status_code == 201
assert (await authorized_client.get("/stars?type=channel")).json() == []
assert (await authorized_client.get("/stars?type=message")).json() == []
response = await authorized_client.get("/stars?type=server")
assert response.status_code == 200
stars = response.json()
assert len(stars) == 1
assert stars[0]["type"] == "server"
assert stars[0]["server"] == data["server"]
| 38.782443
| 118
| 0.651708
| 1,190
| 10,161
| 5.382353
| 0.053782
| 0.147385
| 0.131148
| 0.093677
| 0.920999
| 0.908821
| 0.904606
| 0.880094
| 0.845746
| 0.836534
| 0
| 0.01247
| 0.226553
| 10,161
| 261
| 119
| 38.931034
| 0.802519
| 0
| 0
| 0.760563
| 0
| 0
| 0.070367
| 0
| 0
| 0
| 0
| 0
| 0.300469
| 1
| 0
| false
| 0
| 0.042254
| 0
| 0.046948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5f64bdeca4e52eeec1da5baf3b66171ccecb9b9f
| 6,617
|
py
|
Python
|
atv/mailer.py
|
sebastianlees/answer.tv
|
f9827c1168f5488e043ab99dc1e4439373d99f31
|
[
"MIT"
] | null | null | null |
atv/mailer.py
|
sebastianlees/answer.tv
|
f9827c1168f5488e043ab99dc1e4439373d99f31
|
[
"MIT"
] | 61
|
2018-07-09T17:33:15.000Z
|
2020-01-06T18:46:53.000Z
|
atv/mailer.py
|
sebs-code/answer-tv
|
f9827c1168f5488e043ab99dc1e4439373d99f31
|
[
"MIT"
] | 2
|
2018-08-06T03:39:46.000Z
|
2019-10-02T17:48:34.000Z
|
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#Email server settings
smtp_server = 'email-smtp.eu-west-1.amazonaws.com'
smtp_username = 'AKIAID2MYX7XYWL7TKSA'
smtp_password = 'Alv3cd3HeejmJqItsKe9JGPx150YJswnmz9xjc42Wz/U'
smtp_port = '25'
smtp_do_tls = True
# /Contact contact form mailer
def contactForm(sender, email, message):
toaddrs = 'sebastian@incerto.net'
msg = "From: team@answer.tv " + message
#Change according to your settings
server.set_debuglevel(10)
server.tls()
server.login(smtp_username, smtp_password)
server.sendmail(sender, toaddrs, msg)
server.quit()
#Signup email
def signUp(email, verify):
msg = MIMEMultipart('alternative')
msg['Subject'] = "Answer.tv registration"
msg['From'] = 'registration@answer.tv' # Your from name and email address
msg['To'] = email
text = '''Thank you for signing up to answer.tv! To complete your
registration, simply click on the link below to activate your
account. If you cannot click on the link please copy and paste into
your web browser. Please note that this link will expire within 24
hours.\n http://www.answer.tv/verify/''' + verify + '''\nIf you
believe you have received this email in error, please contact us at
answer.tv/contact.\n Regards \n The answer.tv team\n'''
part1 = MIMEText(text, 'plain')
html = '''<p>Thank you for signing up to answer.tv! To complete your
registration, simply click on the link below to activate your
account. If you cannot click on the link please copy and paste into
your web browser. Please note that this link will expire within 24
hours.</p>''' + "<p><a href='http://www.answer.tv/verify/" + verify\
+ "'>http://www.answer.tv/verify/" + verify + "</a></p>" + """<p>If
you believe that you have received this email in error, please
<a href="http://answer.tv/conatact">contact us</a>.</p><p>Regards</p><p>The answer.tv
team</p>"""
part2 = MIMEText(html, 'html')
username = 'USR'
password = 'PASSWORD'
msg.attach(part1)
msg.attach(part2)
#Change according to your settings
server = smtplib.SMTP(host=smtp_server, port=smtp_port, timeout=10)
server.set_debuglevel(10)
server.starttls()
server.ehlo()
server.login(smtp_username, smtp_password)
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
#Reset password email
def resetPassword(email, resetcode):
msg = MIMEMultipart('alternative')
msg['Subject'] = "Answer.tv reset password request"
msg['From'] = 'registration@answer.tv' # Your from name and email address
msg['To'] = email
text = '''You have requested to reset your answer.tv password. Click on the
link below to reset your password. If you cannot click on the link,
please copy and paste the address into your web browser. Please note
that this link will expire within 24 hours.\n http://www.answer.tv/
reset/''' + resetcode + """\nIf you believe you have received this
email in error, or you have not requested a password reset, please
contact us at answer.tv/contact.\n Regards \n The answer.tv team\n"""
part1 = MIMEText(text, 'plain')
html = '''<p>You have requested to reset your answer.tv password. Click on
the link below to reset your password. If you cannot click on the
link, please copy and paste the address into your web browser.
Please note that this link will expire within 24 hours.</p>''' + """
<p><a href='http://www.answer.tv/reset/""" + resetcode + """'>
http://www.answer.tv/reset/""" + resetcode + "</a></p>" + """<p>If
you believe that you have received this email in error, or you have
not requested a password reset, please <a href="http://answer.tv/conatact">contact us</a>.
</p><p>Regards</p><p>The answer.tv team</p>"""
part2 = MIMEText(html, 'html')
username = 'USR'
password = 'PASSWORD'
msg.attach(part1)
msg.attach(part2)
#Change according to your settings
server = smtplib.SMTP(host=smtp_server, port=smtp_port, timeout=10)
server.set_debuglevel(10)
server.starttls()
server.ehlo()
server.login(smtp_username, smtp_password)
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
#Reset email password
def resetEmail(email, resetcode):
msg = MIMEMultipart('alternative')
msg['Subject'] = "Answer.tv email change request"
msg['From'] = 'registration@answer.tv' # Your from name and email address
msg['To'] = email
text = '''You have requested to change your answer.tv email address. Click
on the link below to reset your email address. If you cannot click on
the link, please copy and paste the address into your web browser.
Please note that this link will expire within 24 hours.\n
http://www.answer.tv/ereset/''' + resetcode + """\nIf you believe
you have received this email in error, or you have not requested an
email change please contact us at answer.tv/contact.\n Regards \n
The answer.tv team\n"""
part1 = MIMEText(text, 'plain')
html = '''<p>You have requested to change your answer.tv email address.
Click on the link below to reset your email address. If you cannot
click on the link, please copy and paste the address into your web
browser. Please note that this link will expire within 24 hours.</p>
''' + "<p><a href='http://www.answer.tv/ereset/" + resetcode + """'>
http://www.answer.tv/ereset/""" + resetcode + "</a></p>" + """<p>If
you believe that you have received this email in error, or you have
not requested a email change, <a href="http://answer.tv/conatact">contact us</a>.
</p><p>Regards</p><p>The answer.tv team</p>"""
part2 = MIMEText(html, 'html')
username = 'USR'
password = 'PASSWORD'
msg.attach(part1)
msg.attach(part2)
#Change according to your settings
server = smtplib.SMTP(host=smtp_server, port=smtp_port, timeout=10)
server.set_debuglevel(10)
server.starttls()
server.ehlo()
server.login(smtp_username, smtp_password)
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
| 40.845679
| 101
| 0.644854
| 916
| 6,617
| 4.628821
| 0.14083
| 0.064151
| 0.028302
| 0.039623
| 0.871934
| 0.866981
| 0.827594
| 0.816981
| 0.805425
| 0.777594
| 0
| 0.010518
| 0.238477
| 6,617
| 161
| 102
| 41.099379
| 0.830919
| 0.050929
| 0
| 0.508333
| 0
| 0.025
| 0.609764
| 0.038928
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0.133333
| 0.033333
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
5f6808d3f91fade9b4e9b840644da7df5b6bc168
| 59,188
|
py
|
Python
|
src/steputils/express/expressListener.py
|
w0rm/steputils
|
f49c0b282dad8087fd4d5ac153412c6a5bdfef29
|
[
"MIT"
] | 16
|
2020-06-30T09:28:46.000Z
|
2022-02-18T02:55:44.000Z
|
src/steputils/express/expressListener.py
|
w0rm/steputils
|
f49c0b282dad8087fd4d5ac153412c6a5bdfef29
|
[
"MIT"
] | 3
|
2020-07-14T15:49:14.000Z
|
2022-03-29T03:20:33.000Z
|
src/steputils/express/expressListener.py
|
w0rm/steputils
|
f49c0b282dad8087fd4d5ac153412c6a5bdfef29
|
[
"MIT"
] | 5
|
2020-07-21T16:21:06.000Z
|
2022-03-28T12:03:30.000Z
|
# Generated from D:/Source/steputils/data/formal\express.g4 by ANTLR 4.7.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .expressParser import expressParser
else:
from expressParser import expressParser
# This class defines a complete listener for a parse tree produced by expressParser.
class expressListener(ParseTreeListener):
# Enter a parse tree produced by expressParser#attribute_ref.
def enterAttribute_ref(self, ctx:expressParser.Attribute_refContext):
pass
# Exit a parse tree produced by expressParser#attribute_ref.
def exitAttribute_ref(self, ctx:expressParser.Attribute_refContext):
pass
# Enter a parse tree produced by expressParser#constant_ref.
def enterConstant_ref(self, ctx:expressParser.Constant_refContext):
pass
# Exit a parse tree produced by expressParser#constant_ref.
def exitConstant_ref(self, ctx:expressParser.Constant_refContext):
pass
# Enter a parse tree produced by expressParser#entity_ref.
def enterEntity_ref(self, ctx:expressParser.Entity_refContext):
pass
# Exit a parse tree produced by expressParser#entity_ref.
def exitEntity_ref(self, ctx:expressParser.Entity_refContext):
pass
# Enter a parse tree produced by expressParser#enumeration_ref.
def enterEnumeration_ref(self, ctx:expressParser.Enumeration_refContext):
pass
# Exit a parse tree produced by expressParser#enumeration_ref.
def exitEnumeration_ref(self, ctx:expressParser.Enumeration_refContext):
pass
# Enter a parse tree produced by expressParser#function_ref.
def enterFunction_ref(self, ctx:expressParser.Function_refContext):
pass
# Exit a parse tree produced by expressParser#function_ref.
def exitFunction_ref(self, ctx:expressParser.Function_refContext):
pass
# Enter a parse tree produced by expressParser#parameter_ref.
def enterParameter_ref(self, ctx:expressParser.Parameter_refContext):
pass
# Exit a parse tree produced by expressParser#parameter_ref.
def exitParameter_ref(self, ctx:expressParser.Parameter_refContext):
pass
# Enter a parse tree produced by expressParser#procedure_ref.
def enterProcedure_ref(self, ctx:expressParser.Procedure_refContext):
pass
# Exit a parse tree produced by expressParser#procedure_ref.
def exitProcedure_ref(self, ctx:expressParser.Procedure_refContext):
pass
# Enter a parse tree produced by expressParser#rule_label_ref.
def enterRule_label_ref(self, ctx:expressParser.Rule_label_refContext):
pass
# Exit a parse tree produced by expressParser#rule_label_ref.
def exitRule_label_ref(self, ctx:expressParser.Rule_label_refContext):
pass
# Enter a parse tree produced by expressParser#rule_ref.
def enterRule_ref(self, ctx:expressParser.Rule_refContext):
pass
# Exit a parse tree produced by expressParser#rule_ref.
def exitRule_ref(self, ctx:expressParser.Rule_refContext):
pass
# Enter a parse tree produced by expressParser#schema_ref.
def enterSchema_ref(self, ctx:expressParser.Schema_refContext):
pass
# Exit a parse tree produced by expressParser#schema_ref.
def exitSchema_ref(self, ctx:expressParser.Schema_refContext):
pass
# Enter a parse tree produced by expressParser#subtype_constraint_ref.
def enterSubtype_constraint_ref(self, ctx:expressParser.Subtype_constraint_refContext):
pass
# Exit a parse tree produced by expressParser#subtype_constraint_ref.
def exitSubtype_constraint_ref(self, ctx:expressParser.Subtype_constraint_refContext):
pass
# Enter a parse tree produced by expressParser#type_label_ref.
def enterType_label_ref(self, ctx:expressParser.Type_label_refContext):
pass
# Exit a parse tree produced by expressParser#type_label_ref.
def exitType_label_ref(self, ctx:expressParser.Type_label_refContext):
pass
# Enter a parse tree produced by expressParser#type_ref.
def enterType_ref(self, ctx:expressParser.Type_refContext):
pass
# Exit a parse tree produced by expressParser#type_ref.
def exitType_ref(self, ctx:expressParser.Type_refContext):
pass
# Enter a parse tree produced by expressParser#variable_ref.
def enterVariable_ref(self, ctx:expressParser.Variable_refContext):
pass
# Exit a parse tree produced by expressParser#variable_ref.
def exitVariable_ref(self, ctx:expressParser.Variable_refContext):
pass
# Enter a parse tree produced by expressParser#abstract_entity_declaration.
def enterAbstract_entity_declaration(self, ctx:expressParser.Abstract_entity_declarationContext):
pass
# Exit a parse tree produced by expressParser#abstract_entity_declaration.
def exitAbstract_entity_declaration(self, ctx:expressParser.Abstract_entity_declarationContext):
pass
# Enter a parse tree produced by expressParser#abstract_supertype.
def enterAbstract_supertype(self, ctx:expressParser.Abstract_supertypeContext):
pass
# Exit a parse tree produced by expressParser#abstract_supertype.
def exitAbstract_supertype(self, ctx:expressParser.Abstract_supertypeContext):
pass
# Enter a parse tree produced by expressParser#abstract_supertype_declaration.
def enterAbstract_supertype_declaration(self, ctx:expressParser.Abstract_supertype_declarationContext):
pass
# Exit a parse tree produced by expressParser#abstract_supertype_declaration.
def exitAbstract_supertype_declaration(self, ctx:expressParser.Abstract_supertype_declarationContext):
pass
# Enter a parse tree produced by expressParser#actual_parameter_list.
def enterActual_parameter_list(self, ctx:expressParser.Actual_parameter_listContext):
pass
# Exit a parse tree produced by expressParser#actual_parameter_list.
def exitActual_parameter_list(self, ctx:expressParser.Actual_parameter_listContext):
pass
# Enter a parse tree produced by expressParser#add_like_op.
def enterAdd_like_op(self, ctx:expressParser.Add_like_opContext):
pass
# Exit a parse tree produced by expressParser#add_like_op.
def exitAdd_like_op(self, ctx:expressParser.Add_like_opContext):
pass
# Enter a parse tree produced by expressParser#aggregate_initializer.
def enterAggregate_initializer(self, ctx:expressParser.Aggregate_initializerContext):
pass
# Exit a parse tree produced by expressParser#aggregate_initializer.
def exitAggregate_initializer(self, ctx:expressParser.Aggregate_initializerContext):
pass
# Enter a parse tree produced by expressParser#aggregate_source.
def enterAggregate_source(self, ctx:expressParser.Aggregate_sourceContext):
pass
# Exit a parse tree produced by expressParser#aggregate_source.
def exitAggregate_source(self, ctx:expressParser.Aggregate_sourceContext):
pass
# Enter a parse tree produced by expressParser#aggregate_type.
def enterAggregate_type(self, ctx:expressParser.Aggregate_typeContext):
pass
# Exit a parse tree produced by expressParser#aggregate_type.
def exitAggregate_type(self, ctx:expressParser.Aggregate_typeContext):
pass
# Enter a parse tree produced by expressParser#aggregation_types.
def enterAggregation_types(self, ctx:expressParser.Aggregation_typesContext):
pass
# Exit a parse tree produced by expressParser#aggregation_types.
def exitAggregation_types(self, ctx:expressParser.Aggregation_typesContext):
pass
# Enter a parse tree produced by expressParser#algorithm_head.
def enterAlgorithm_head(self, ctx:expressParser.Algorithm_headContext):
pass
# Exit a parse tree produced by expressParser#algorithm_head.
def exitAlgorithm_head(self, ctx:expressParser.Algorithm_headContext):
pass
# Enter a parse tree produced by expressParser#alias_stmt.
def enterAlias_stmt(self, ctx:expressParser.Alias_stmtContext):
pass
# Exit a parse tree produced by expressParser#alias_stmt.
def exitAlias_stmt(self, ctx:expressParser.Alias_stmtContext):
pass
# Enter a parse tree produced by expressParser#array_type.
def enterArray_type(self, ctx:expressParser.Array_typeContext):
pass
# Exit a parse tree produced by expressParser#array_type.
def exitArray_type(self, ctx:expressParser.Array_typeContext):
pass
# Enter a parse tree produced by expressParser#assignment_stmt.
def enterAssignment_stmt(self, ctx:expressParser.Assignment_stmtContext):
pass
# Exit a parse tree produced by expressParser#assignment_stmt.
def exitAssignment_stmt(self, ctx:expressParser.Assignment_stmtContext):
pass
# Enter a parse tree produced by expressParser#attribute_decl.
def enterAttribute_decl(self, ctx:expressParser.Attribute_declContext):
pass
# Exit a parse tree produced by expressParser#attribute_decl.
def exitAttribute_decl(self, ctx:expressParser.Attribute_declContext):
pass
# Enter a parse tree produced by expressParser#attribute_id.
def enterAttribute_id(self, ctx:expressParser.Attribute_idContext):
pass
# Exit a parse tree produced by expressParser#attribute_id.
def exitAttribute_id(self, ctx:expressParser.Attribute_idContext):
pass
# Enter a parse tree produced by expressParser#attribute_qualifier.
def enterAttribute_qualifier(self, ctx:expressParser.Attribute_qualifierContext):
pass
# Exit a parse tree produced by expressParser#attribute_qualifier.
def exitAttribute_qualifier(self, ctx:expressParser.Attribute_qualifierContext):
pass
# Enter a parse tree produced by expressParser#bag_type.
def enterBag_type(self, ctx:expressParser.Bag_typeContext):
pass
# Exit a parse tree produced by expressParser#bag_type.
def exitBag_type(self, ctx:expressParser.Bag_typeContext):
pass
# Enter a parse tree produced by expressParser#binary_type.
def enterBinary_type(self, ctx:expressParser.Binary_typeContext):
pass
# Exit a parse tree produced by expressParser#binary_type.
def exitBinary_type(self, ctx:expressParser.Binary_typeContext):
pass
# Enter a parse tree produced by expressParser#boolean_type.
def enterBoolean_type(self, ctx:expressParser.Boolean_typeContext):
pass
# Exit a parse tree produced by expressParser#boolean_type.
def exitBoolean_type(self, ctx:expressParser.Boolean_typeContext):
pass
# Enter a parse tree produced by expressParser#bound_1.
def enterBound_1(self, ctx:expressParser.Bound_1Context):
pass
# Exit a parse tree produced by expressParser#bound_1.
def exitBound_1(self, ctx:expressParser.Bound_1Context):
pass
# Enter a parse tree produced by expressParser#bound_2.
def enterBound_2(self, ctx:expressParser.Bound_2Context):
pass
# Exit a parse tree produced by expressParser#bound_2.
def exitBound_2(self, ctx:expressParser.Bound_2Context):
pass
# Enter a parse tree produced by expressParser#bound_spec.
def enterBound_spec(self, ctx:expressParser.Bound_specContext):
pass
# Exit a parse tree produced by expressParser#bound_spec.
def exitBound_spec(self, ctx:expressParser.Bound_specContext):
pass
# Enter a parse tree produced by expressParser#built_in_constant.
def enterBuilt_in_constant(self, ctx:expressParser.Built_in_constantContext):
pass
# Exit a parse tree produced by expressParser#built_in_constant.
def exitBuilt_in_constant(self, ctx:expressParser.Built_in_constantContext):
pass
# Enter a parse tree produced by expressParser#built_in_function.
def enterBuilt_in_function(self, ctx:expressParser.Built_in_functionContext):
pass
# Exit a parse tree produced by expressParser#built_in_function.
def exitBuilt_in_function(self, ctx:expressParser.Built_in_functionContext):
pass
# Enter a parse tree produced by expressParser#built_in_procedure.
def enterBuilt_in_procedure(self, ctx:expressParser.Built_in_procedureContext):
pass
# Exit a parse tree produced by expressParser#built_in_procedure.
def exitBuilt_in_procedure(self, ctx:expressParser.Built_in_procedureContext):
pass
# Enter a parse tree produced by expressParser#case_action.
def enterCase_action(self, ctx:expressParser.Case_actionContext):
pass
# Exit a parse tree produced by expressParser#case_action.
def exitCase_action(self, ctx:expressParser.Case_actionContext):
pass
# Enter a parse tree produced by expressParser#case_label.
def enterCase_label(self, ctx:expressParser.Case_labelContext):
pass
# Exit a parse tree produced by expressParser#case_label.
def exitCase_label(self, ctx:expressParser.Case_labelContext):
pass
# Enter a parse tree produced by expressParser#case_stmt.
def enterCase_stmt(self, ctx:expressParser.Case_stmtContext):
pass
# Exit a parse tree produced by expressParser#case_stmt.
def exitCase_stmt(self, ctx:expressParser.Case_stmtContext):
pass
# Enter a parse tree produced by expressParser#compound_stmt.
def enterCompound_stmt(self, ctx:expressParser.Compound_stmtContext):
pass
# Exit a parse tree produced by expressParser#compound_stmt.
def exitCompound_stmt(self, ctx:expressParser.Compound_stmtContext):
pass
# Enter a parse tree produced by expressParser#concrete_types.
def enterConcrete_types(self, ctx:expressParser.Concrete_typesContext):
pass
# Exit a parse tree produced by expressParser#concrete_types.
def exitConcrete_types(self, ctx:expressParser.Concrete_typesContext):
pass
# Enter a parse tree produced by expressParser#constant_body.
def enterConstant_body(self, ctx:expressParser.Constant_bodyContext):
pass
# Exit a parse tree produced by expressParser#constant_body.
def exitConstant_body(self, ctx:expressParser.Constant_bodyContext):
pass
# Enter a parse tree produced by expressParser#constant_decl.
def enterConstant_decl(self, ctx:expressParser.Constant_declContext):
pass
# Exit a parse tree produced by expressParser#constant_decl.
def exitConstant_decl(self, ctx:expressParser.Constant_declContext):
pass
# Enter a parse tree produced by expressParser#constant_factor.
def enterConstant_factor(self, ctx:expressParser.Constant_factorContext):
pass
# Exit a parse tree produced by expressParser#constant_factor.
def exitConstant_factor(self, ctx:expressParser.Constant_factorContext):
pass
# Enter a parse tree produced by expressParser#constant_id.
def enterConstant_id(self, ctx:expressParser.Constant_idContext):
pass
# Exit a parse tree produced by expressParser#constant_id.
def exitConstant_id(self, ctx:expressParser.Constant_idContext):
pass
# Enter a parse tree produced by expressParser#constructed_types.
def enterConstructed_types(self, ctx:expressParser.Constructed_typesContext):
pass
# Exit a parse tree produced by expressParser#constructed_types.
def exitConstructed_types(self, ctx:expressParser.Constructed_typesContext):
pass
# Enter a parse tree produced by expressParser#declaration.
def enterDeclaration(self, ctx:expressParser.DeclarationContext):
pass
# Exit a parse tree produced by expressParser#declaration.
def exitDeclaration(self, ctx:expressParser.DeclarationContext):
pass
# Enter a parse tree produced by expressParser#derived_attr.
def enterDerived_attr(self, ctx:expressParser.Derived_attrContext):
pass
# Exit a parse tree produced by expressParser#derived_attr.
def exitDerived_attr(self, ctx:expressParser.Derived_attrContext):
pass
# Enter a parse tree produced by expressParser#derive_clause.
def enterDerive_clause(self, ctx:expressParser.Derive_clauseContext):
pass
# Exit a parse tree produced by expressParser#derive_clause.
def exitDerive_clause(self, ctx:expressParser.Derive_clauseContext):
pass
# Enter a parse tree produced by expressParser#domain_rule.
def enterDomain_rule(self, ctx:expressParser.Domain_ruleContext):
pass
# Exit a parse tree produced by expressParser#domain_rule.
def exitDomain_rule(self, ctx:expressParser.Domain_ruleContext):
pass
# Enter a parse tree produced by expressParser#element.
def enterElement(self, ctx:expressParser.ElementContext):
pass
# Exit a parse tree produced by expressParser#element.
def exitElement(self, ctx:expressParser.ElementContext):
pass
# Enter a parse tree produced by expressParser#entity_body.
def enterEntity_body(self, ctx:expressParser.Entity_bodyContext):
pass
# Exit a parse tree produced by expressParser#entity_body.
def exitEntity_body(self, ctx:expressParser.Entity_bodyContext):
pass
# Enter a parse tree produced by expressParser#entity_constructor.
def enterEntity_constructor(self, ctx:expressParser.Entity_constructorContext):
pass
# Exit a parse tree produced by expressParser#entity_constructor.
def exitEntity_constructor(self, ctx:expressParser.Entity_constructorContext):
pass
# Enter a parse tree produced by expressParser#entity_decl.
def enterEntity_decl(self, ctx:expressParser.Entity_declContext):
pass
# Exit a parse tree produced by expressParser#entity_decl.
def exitEntity_decl(self, ctx:expressParser.Entity_declContext):
pass
# Enter a parse tree produced by expressParser#entity_head.
def enterEntity_head(self, ctx:expressParser.Entity_headContext):
pass
# Exit a parse tree produced by expressParser#entity_head.
def exitEntity_head(self, ctx:expressParser.Entity_headContext):
pass
# Enter a parse tree produced by expressParser#entity_id.
def enterEntity_id(self, ctx:expressParser.Entity_idContext):
pass
# Exit a parse tree produced by expressParser#entity_id.
def exitEntity_id(self, ctx:expressParser.Entity_idContext):
pass
# Enter a parse tree produced by expressParser#enumeration_extension.
def enterEnumeration_extension(self, ctx:expressParser.Enumeration_extensionContext):
pass
# Exit a parse tree produced by expressParser#enumeration_extension.
def exitEnumeration_extension(self, ctx:expressParser.Enumeration_extensionContext):
pass
# Enter a parse tree produced by expressParser#enumeration_id.
def enterEnumeration_id(self, ctx:expressParser.Enumeration_idContext):
pass
# Exit a parse tree produced by expressParser#enumeration_id.
def exitEnumeration_id(self, ctx:expressParser.Enumeration_idContext):
pass
# Enter a parse tree produced by expressParser#enumeration_items.
def enterEnumeration_items(self, ctx:expressParser.Enumeration_itemsContext):
pass
# Exit a parse tree produced by expressParser#enumeration_items.
def exitEnumeration_items(self, ctx:expressParser.Enumeration_itemsContext):
pass
# Enter a parse tree produced by expressParser#enumeration_reference.
def enterEnumeration_reference(self, ctx:expressParser.Enumeration_referenceContext):
pass
# Exit a parse tree produced by expressParser#enumeration_reference.
def exitEnumeration_reference(self, ctx:expressParser.Enumeration_referenceContext):
pass
# Enter a parse tree produced by expressParser#enumeration_type.
def enterEnumeration_type(self, ctx:expressParser.Enumeration_typeContext):
pass
# Exit a parse tree produced by expressParser#enumeration_type.
def exitEnumeration_type(self, ctx:expressParser.Enumeration_typeContext):
pass
# Enter a parse tree produced by expressParser#escape_stmt.
def enterEscape_stmt(self, ctx:expressParser.Escape_stmtContext):
pass
# Exit a parse tree produced by expressParser#escape_stmt.
def exitEscape_stmt(self, ctx:expressParser.Escape_stmtContext):
pass
# Enter a parse tree produced by expressParser#explicit_attr.
def enterExplicit_attr(self, ctx:expressParser.Explicit_attrContext):
pass
# Exit a parse tree produced by expressParser#explicit_attr.
def exitExplicit_attr(self, ctx:expressParser.Explicit_attrContext):
pass
# Enter a parse tree produced by expressParser#expression.
def enterExpression(self, ctx:expressParser.ExpressionContext):
pass
# Exit a parse tree produced by expressParser#expression.
def exitExpression(self, ctx:expressParser.ExpressionContext):
pass
# Enter a parse tree produced by expressParser#factor.
def enterFactor(self, ctx:expressParser.FactorContext):
pass
# Exit a parse tree produced by expressParser#factor.
def exitFactor(self, ctx:expressParser.FactorContext):
pass
# Enter a parse tree produced by expressParser#formal_parameter.
def enterFormal_parameter(self, ctx:expressParser.Formal_parameterContext):
pass
# Exit a parse tree produced by expressParser#formal_parameter.
def exitFormal_parameter(self, ctx:expressParser.Formal_parameterContext):
pass
# Enter a parse tree produced by expressParser#function_call.
def enterFunction_call(self, ctx:expressParser.Function_callContext):
pass
# Exit a parse tree produced by expressParser#function_call.
def exitFunction_call(self, ctx:expressParser.Function_callContext):
pass
# Enter a parse tree produced by expressParser#function_decl.
def enterFunction_decl(self, ctx:expressParser.Function_declContext):
pass
# Exit a parse tree produced by expressParser#function_decl.
def exitFunction_decl(self, ctx:expressParser.Function_declContext):
pass
# Enter a parse tree produced by expressParser#function_head.
def enterFunction_head(self, ctx:expressParser.Function_headContext):
pass
# Exit a parse tree produced by expressParser#function_head.
def exitFunction_head(self, ctx:expressParser.Function_headContext):
pass
# Enter a parse tree produced by expressParser#function_id.
def enterFunction_id(self, ctx:expressParser.Function_idContext):
pass
# Exit a parse tree produced by expressParser#function_id.
def exitFunction_id(self, ctx:expressParser.Function_idContext):
pass
# Enter a parse tree produced by expressParser#generalized_types.
def enterGeneralized_types(self, ctx:expressParser.Generalized_typesContext):
pass
# Exit a parse tree produced by expressParser#generalized_types.
def exitGeneralized_types(self, ctx:expressParser.Generalized_typesContext):
pass
# Enter a parse tree produced by expressParser#general_aggregation_types.
def enterGeneral_aggregation_types(self, ctx:expressParser.General_aggregation_typesContext):
pass
# Exit a parse tree produced by expressParser#general_aggregation_types.
def exitGeneral_aggregation_types(self, ctx:expressParser.General_aggregation_typesContext):
pass
# Enter a parse tree produced by expressParser#general_array_type.
def enterGeneral_array_type(self, ctx:expressParser.General_array_typeContext):
pass
# Exit a parse tree produced by expressParser#general_array_type.
def exitGeneral_array_type(self, ctx:expressParser.General_array_typeContext):
pass
# Enter a parse tree produced by expressParser#general_bag_type.
def enterGeneral_bag_type(self, ctx:expressParser.General_bag_typeContext):
pass
# Exit a parse tree produced by expressParser#general_bag_type.
def exitGeneral_bag_type(self, ctx:expressParser.General_bag_typeContext):
pass
# Enter a parse tree produced by expressParser#general_list_type.
def enterGeneral_list_type(self, ctx:expressParser.General_list_typeContext):
pass
# Exit a parse tree produced by expressParser#general_list_type.
def exitGeneral_list_type(self, ctx:expressParser.General_list_typeContext):
pass
# Enter a parse tree produced by expressParser#general_ref.
def enterGeneral_ref(self, ctx:expressParser.General_refContext):
pass
# Exit a parse tree produced by expressParser#general_ref.
def exitGeneral_ref(self, ctx:expressParser.General_refContext):
pass
# Enter a parse tree produced by expressParser#general_set_type.
def enterGeneral_set_type(self, ctx:expressParser.General_set_typeContext):
pass
# Exit a parse tree produced by expressParser#general_set_type.
def exitGeneral_set_type(self, ctx:expressParser.General_set_typeContext):
pass
# Enter a parse tree produced by expressParser#generic_entity_type.
def enterGeneric_entity_type(self, ctx:expressParser.Generic_entity_typeContext):
pass
# Exit a parse tree produced by expressParser#generic_entity_type.
def exitGeneric_entity_type(self, ctx:expressParser.Generic_entity_typeContext):
pass
# Enter a parse tree produced by expressParser#generic_type.
def enterGeneric_type(self, ctx:expressParser.Generic_typeContext):
pass
# Exit a parse tree produced by expressParser#generic_type.
def exitGeneric_type(self, ctx:expressParser.Generic_typeContext):
pass
# Enter a parse tree produced by expressParser#group_qualifier.
def enterGroup_qualifier(self, ctx:expressParser.Group_qualifierContext):
pass
# Exit a parse tree produced by expressParser#group_qualifier.
def exitGroup_qualifier(self, ctx:expressParser.Group_qualifierContext):
pass
# Enter a parse tree produced by expressParser#if_stmt.
def enterIf_stmt(self, ctx:expressParser.If_stmtContext):
pass
# Exit a parse tree produced by expressParser#if_stmt.
def exitIf_stmt(self, ctx:expressParser.If_stmtContext):
pass
# Enter a parse tree produced by expressParser#increment.
def enterIncrement(self, ctx:expressParser.IncrementContext):
pass
# Exit a parse tree produced by expressParser#increment.
def exitIncrement(self, ctx:expressParser.IncrementContext):
pass
# Enter a parse tree produced by expressParser#increment_control.
def enterIncrement_control(self, ctx:expressParser.Increment_controlContext):
pass
# Exit a parse tree produced by expressParser#increment_control.
def exitIncrement_control(self, ctx:expressParser.Increment_controlContext):
pass
# Enter a parse tree produced by expressParser#index.
def enterIndex(self, ctx:expressParser.IndexContext):
pass
# Exit a parse tree produced by expressParser#index.
def exitIndex(self, ctx:expressParser.IndexContext):
pass
# Enter a parse tree produced by expressParser#index_1.
def enterIndex_1(self, ctx:expressParser.Index_1Context):
pass
# Exit a parse tree produced by expressParser#index_1.
def exitIndex_1(self, ctx:expressParser.Index_1Context):
pass
# Enter a parse tree produced by expressParser#index_2.
def enterIndex_2(self, ctx:expressParser.Index_2Context):
pass
# Exit a parse tree produced by expressParser#index_2.
def exitIndex_2(self, ctx:expressParser.Index_2Context):
pass
# Enter a parse tree produced by expressParser#index_qualifier.
def enterIndex_qualifier(self, ctx:expressParser.Index_qualifierContext):
pass
# Exit a parse tree produced by expressParser#index_qualifier.
def exitIndex_qualifier(self, ctx:expressParser.Index_qualifierContext):
pass
# Enter a parse tree produced by expressParser#instantiable_type.
def enterInstantiable_type(self, ctx:expressParser.Instantiable_typeContext):
pass
# Exit a parse tree produced by expressParser#instantiable_type.
def exitInstantiable_type(self, ctx:expressParser.Instantiable_typeContext):
pass
# Enter a parse tree produced by expressParser#integer_type.
def enterInteger_type(self, ctx:expressParser.Integer_typeContext):
pass
# Exit a parse tree produced by expressParser#integer_type.
def exitInteger_type(self, ctx:expressParser.Integer_typeContext):
pass
# Enter a parse tree produced by expressParser#interface_specification.
def enterInterface_specification(self, ctx:expressParser.Interface_specificationContext):
pass
# Exit a parse tree produced by expressParser#interface_specification.
def exitInterface_specification(self, ctx:expressParser.Interface_specificationContext):
pass
# Enter a parse tree produced by expressParser#interval.
def enterInterval(self, ctx:expressParser.IntervalContext):
pass
# Exit a parse tree produced by expressParser#interval.
def exitInterval(self, ctx:expressParser.IntervalContext):
pass
# Enter a parse tree produced by expressParser#interval_high.
def enterInterval_high(self, ctx:expressParser.Interval_highContext):
pass
# Exit a parse tree produced by expressParser#interval_high.
def exitInterval_high(self, ctx:expressParser.Interval_highContext):
pass
# Enter a parse tree produced by expressParser#interval_item.
def enterInterval_item(self, ctx:expressParser.Interval_itemContext):
pass
# Exit a parse tree produced by expressParser#interval_item.
def exitInterval_item(self, ctx:expressParser.Interval_itemContext):
pass
# Enter a parse tree produced by expressParser#interval_low.
def enterInterval_low(self, ctx:expressParser.Interval_lowContext):
pass
# Exit a parse tree produced by expressParser#interval_low.
def exitInterval_low(self, ctx:expressParser.Interval_lowContext):
pass
# Enter a parse tree produced by expressParser#interval_op.
def enterInterval_op(self, ctx:expressParser.Interval_opContext):
pass
# Exit a parse tree produced by expressParser#interval_op.
def exitInterval_op(self, ctx:expressParser.Interval_opContext):
pass
# Enter a parse tree produced by expressParser#inverse_attr.
def enterInverse_attr(self, ctx:expressParser.Inverse_attrContext):
pass
# Exit a parse tree produced by expressParser#inverse_attr.
def exitInverse_attr(self, ctx:expressParser.Inverse_attrContext):
pass
# Enter a parse tree produced by expressParser#inverse_clause.
def enterInverse_clause(self, ctx:expressParser.Inverse_clauseContext):
pass
# Exit a parse tree produced by expressParser#inverse_clause.
def exitInverse_clause(self, ctx:expressParser.Inverse_clauseContext):
pass
# Enter a parse tree produced by expressParser#list_type.
def enterList_type(self, ctx:expressParser.List_typeContext):
pass
# Exit a parse tree produced by expressParser#list_type.
def exitList_type(self, ctx:expressParser.List_typeContext):
pass
# Enter a parse tree produced by expressParser#literal.
def enterLiteral(self, ctx:expressParser.LiteralContext):
pass
# Exit a parse tree produced by expressParser#literal.
def exitLiteral(self, ctx:expressParser.LiteralContext):
pass
# Enter a parse tree produced by expressParser#local_decl.
def enterLocal_decl(self, ctx:expressParser.Local_declContext):
pass
# Exit a parse tree produced by expressParser#local_decl.
def exitLocal_decl(self, ctx:expressParser.Local_declContext):
pass
# Enter a parse tree produced by expressParser#local_variable.
def enterLocal_variable(self, ctx:expressParser.Local_variableContext):
pass
# Exit a parse tree produced by expressParser#local_variable.
def exitLocal_variable(self, ctx:expressParser.Local_variableContext):
pass
# Enter a parse tree produced by expressParser#logical_expression.
def enterLogical_expression(self, ctx:expressParser.Logical_expressionContext):
pass
# Exit a parse tree produced by expressParser#logical_expression.
def exitLogical_expression(self, ctx:expressParser.Logical_expressionContext):
pass
# Enter a parse tree produced by expressParser#logical_literal.
def enterLogical_literal(self, ctx:expressParser.Logical_literalContext):
pass
# Exit a parse tree produced by expressParser#logical_literal.
def exitLogical_literal(self, ctx:expressParser.Logical_literalContext):
pass
# Enter a parse tree produced by expressParser#logical_type.
def enterLogical_type(self, ctx:expressParser.Logical_typeContext):
pass
# Exit a parse tree produced by expressParser#logical_type.
def exitLogical_type(self, ctx:expressParser.Logical_typeContext):
pass
# Enter a parse tree produced by expressParser#multiplication_like_op.
def enterMultiplication_like_op(self, ctx:expressParser.Multiplication_like_opContext):
pass
# Exit a parse tree produced by expressParser#multiplication_like_op.
def exitMultiplication_like_op(self, ctx:expressParser.Multiplication_like_opContext):
pass
# Enter a parse tree produced by expressParser#named_types.
def enterNamed_types(self, ctx:expressParser.Named_typesContext):
pass
# Exit a parse tree produced by expressParser#named_types.
def exitNamed_types(self, ctx:expressParser.Named_typesContext):
pass
# Enter a parse tree produced by expressParser#named_type_or_rename.
def enterNamed_type_or_rename(self, ctx:expressParser.Named_type_or_renameContext):
pass
# Exit a parse tree produced by expressParser#named_type_or_rename.
def exitNamed_type_or_rename(self, ctx:expressParser.Named_type_or_renameContext):
pass
# Enter a parse tree produced by expressParser#null_stmt.
def enterNull_stmt(self, ctx:expressParser.Null_stmtContext):
pass
# Exit a parse tree produced by expressParser#null_stmt.
def exitNull_stmt(self, ctx:expressParser.Null_stmtContext):
pass
# Enter a parse tree produced by expressParser#number_type.
def enterNumber_type(self, ctx:expressParser.Number_typeContext):
pass
# Exit a parse tree produced by expressParser#number_type.
def exitNumber_type(self, ctx:expressParser.Number_typeContext):
pass
# Enter a parse tree produced by expressParser#numeric_expression.
def enterNumeric_expression(self, ctx:expressParser.Numeric_expressionContext):
pass
# Exit a parse tree produced by expressParser#numeric_expression.
def exitNumeric_expression(self, ctx:expressParser.Numeric_expressionContext):
pass
# Enter a parse tree produced by expressParser#one_of.
def enterOne_of(self, ctx:expressParser.One_ofContext):
pass
# Exit a parse tree produced by expressParser#one_of.
def exitOne_of(self, ctx:expressParser.One_ofContext):
pass
# Enter a parse tree produced by expressParser#parameter.
def enterParameter(self, ctx:expressParser.ParameterContext):
pass
# Exit a parse tree produced by expressParser#parameter.
def exitParameter(self, ctx:expressParser.ParameterContext):
pass
# Enter a parse tree produced by expressParser#parameter_id.
def enterParameter_id(self, ctx:expressParser.Parameter_idContext):
pass
# Exit a parse tree produced by expressParser#parameter_id.
def exitParameter_id(self, ctx:expressParser.Parameter_idContext):
pass
# Enter a parse tree produced by expressParser#parameter_type.
def enterParameter_type(self, ctx:expressParser.Parameter_typeContext):
pass
# Exit a parse tree produced by expressParser#parameter_type.
def exitParameter_type(self, ctx:expressParser.Parameter_typeContext):
pass
# Enter a parse tree produced by expressParser#population.
def enterPopulation(self, ctx:expressParser.PopulationContext):
pass
# Exit a parse tree produced by expressParser#population.
def exitPopulation(self, ctx:expressParser.PopulationContext):
pass
# Enter a parse tree produced by expressParser#precision_spec.
def enterPrecision_spec(self, ctx:expressParser.Precision_specContext):
pass
# Exit a parse tree produced by expressParser#precision_spec.
def exitPrecision_spec(self, ctx:expressParser.Precision_specContext):
pass
# Enter a parse tree produced by expressParser#primary.
def enterPrimary(self, ctx:expressParser.PrimaryContext):
pass
# Exit a parse tree produced by expressParser#primary.
def exitPrimary(self, ctx:expressParser.PrimaryContext):
pass
# Enter a parse tree produced by expressParser#procedure_call_stmt.
def enterProcedure_call_stmt(self, ctx:expressParser.Procedure_call_stmtContext):
pass
# Exit a parse tree produced by expressParser#procedure_call_stmt.
def exitProcedure_call_stmt(self, ctx:expressParser.Procedure_call_stmtContext):
pass
# Enter a parse tree produced by expressParser#procedure_decl.
def enterProcedure_decl(self, ctx:expressParser.Procedure_declContext):
pass
# Exit a parse tree produced by expressParser#procedure_decl.
def exitProcedure_decl(self, ctx:expressParser.Procedure_declContext):
pass
# Enter a parse tree produced by expressParser#procedure_head.
def enterProcedure_head(self, ctx:expressParser.Procedure_headContext):
pass
# Exit a parse tree produced by expressParser#procedure_head.
def exitProcedure_head(self, ctx:expressParser.Procedure_headContext):
pass
# Enter a parse tree produced by expressParser#procedure_id.
def enterProcedure_id(self, ctx:expressParser.Procedure_idContext):
pass
# Exit a parse tree produced by expressParser#procedure_id.
def exitProcedure_id(self, ctx:expressParser.Procedure_idContext):
pass
# Enter a parse tree produced by expressParser#qualifiable_factor.
def enterQualifiable_factor(self, ctx:expressParser.Qualifiable_factorContext):
pass
# Exit a parse tree produced by expressParser#qualifiable_factor.
def exitQualifiable_factor(self, ctx:expressParser.Qualifiable_factorContext):
pass
# Enter a parse tree produced by expressParser#qualified_attribute.
def enterQualified_attribute(self, ctx:expressParser.Qualified_attributeContext):
pass
# Exit a parse tree produced by expressParser#qualified_attribute.
def exitQualified_attribute(self, ctx:expressParser.Qualified_attributeContext):
pass
# Enter a parse tree produced by expressParser#qualifier.
def enterQualifier(self, ctx:expressParser.QualifierContext):
pass
# Exit a parse tree produced by expressParser#qualifier.
def exitQualifier(self, ctx:expressParser.QualifierContext):
pass
# Enter a parse tree produced by expressParser#query_expression.
def enterQuery_expression(self, ctx:expressParser.Query_expressionContext):
pass
# Exit a parse tree produced by expressParser#query_expression.
def exitQuery_expression(self, ctx:expressParser.Query_expressionContext):
pass
# Enter a parse tree produced by expressParser#real_type.
def enterReal_type(self, ctx:expressParser.Real_typeContext):
pass
# Exit a parse tree produced by expressParser#real_type.
def exitReal_type(self, ctx:expressParser.Real_typeContext):
pass
# Enter a parse tree produced by expressParser#redeclared_attribute.
def enterRedeclared_attribute(self, ctx:expressParser.Redeclared_attributeContext):
pass
# Exit a parse tree produced by expressParser#redeclared_attribute.
def exitRedeclared_attribute(self, ctx:expressParser.Redeclared_attributeContext):
pass
# Enter a parse tree produced by expressParser#referenced_attribute.
def enterReferenced_attribute(self, ctx:expressParser.Referenced_attributeContext):
pass
# Exit a parse tree produced by expressParser#referenced_attribute.
def exitReferenced_attribute(self, ctx:expressParser.Referenced_attributeContext):
pass
# Enter a parse tree produced by expressParser#reference_clause.
def enterReference_clause(self, ctx:expressParser.Reference_clauseContext):
pass
# Exit a parse tree produced by expressParser#reference_clause.
def exitReference_clause(self, ctx:expressParser.Reference_clauseContext):
pass
# Enter a parse tree produced by expressParser#rel_op.
def enterRel_op(self, ctx:expressParser.Rel_opContext):
pass
# Exit a parse tree produced by expressParser#rel_op.
def exitRel_op(self, ctx:expressParser.Rel_opContext):
pass
# Enter a parse tree produced by expressParser#rel_op_extended.
def enterRel_op_extended(self, ctx:expressParser.Rel_op_extendedContext):
pass
# Exit a parse tree produced by expressParser#rel_op_extended.
def exitRel_op_extended(self, ctx:expressParser.Rel_op_extendedContext):
pass
# Enter a parse tree produced by expressParser#rename_id.
def enterRename_id(self, ctx:expressParser.Rename_idContext):
pass
# Exit a parse tree produced by expressParser#rename_id.
def exitRename_id(self, ctx:expressParser.Rename_idContext):
pass
# Enter a parse tree produced by expressParser#repeat_control.
def enterRepeat_control(self, ctx:expressParser.Repeat_controlContext):
pass
# Exit a parse tree produced by expressParser#repeat_control.
def exitRepeat_control(self, ctx:expressParser.Repeat_controlContext):
pass
# Enter a parse tree produced by expressParser#repeat_stmt.
def enterRepeat_stmt(self, ctx:expressParser.Repeat_stmtContext):
pass
# Exit a parse tree produced by expressParser#repeat_stmt.
def exitRepeat_stmt(self, ctx:expressParser.Repeat_stmtContext):
pass
# Enter a parse tree produced by expressParser#repetition.
def enterRepetition(self, ctx:expressParser.RepetitionContext):
pass
# Exit a parse tree produced by expressParser#repetition.
def exitRepetition(self, ctx:expressParser.RepetitionContext):
pass
# Enter a parse tree produced by expressParser#resource_or_rename.
def enterResource_or_rename(self, ctx:expressParser.Resource_or_renameContext):
pass
# Exit a parse tree produced by expressParser#resource_or_rename.
def exitResource_or_rename(self, ctx:expressParser.Resource_or_renameContext):
pass
# Enter a parse tree produced by expressParser#resource_ref.
def enterResource_ref(self, ctx:expressParser.Resource_refContext):
pass
# Exit a parse tree produced by expressParser#resource_ref.
def exitResource_ref(self, ctx:expressParser.Resource_refContext):
pass
# Enter a parse tree produced by expressParser#return_stmt.
def enterReturn_stmt(self, ctx:expressParser.Return_stmtContext):
pass
# Exit a parse tree produced by expressParser#return_stmt.
def exitReturn_stmt(self, ctx:expressParser.Return_stmtContext):
pass
# Enter a parse tree produced by expressParser#rule_decl.
def enterRule_decl(self, ctx:expressParser.Rule_declContext):
pass
# Exit a parse tree produced by expressParser#rule_decl.
def exitRule_decl(self, ctx:expressParser.Rule_declContext):
pass
# Enter a parse tree produced by expressParser#rule_head.
def enterRule_head(self, ctx:expressParser.Rule_headContext):
pass
# Exit a parse tree produced by expressParser#rule_head.
def exitRule_head(self, ctx:expressParser.Rule_headContext):
pass
# Enter a parse tree produced by expressParser#rule_id.
def enterRule_id(self, ctx:expressParser.Rule_idContext):
pass
# Exit a parse tree produced by expressParser#rule_id.
def exitRule_id(self, ctx:expressParser.Rule_idContext):
pass
# Enter a parse tree produced by expressParser#rule_label_id.
def enterRule_label_id(self, ctx:expressParser.Rule_label_idContext):
pass
# Exit a parse tree produced by expressParser#rule_label_id.
def exitRule_label_id(self, ctx:expressParser.Rule_label_idContext):
pass
# Enter a parse tree produced by expressParser#schema_body.
def enterSchema_body(self, ctx:expressParser.Schema_bodyContext):
pass
# Exit a parse tree produced by expressParser#schema_body.
def exitSchema_body(self, ctx:expressParser.Schema_bodyContext):
pass
# Enter a parse tree produced by expressParser#schema_decl.
def enterSchema_decl(self, ctx:expressParser.Schema_declContext):
pass
# Exit a parse tree produced by expressParser#schema_decl.
def exitSchema_decl(self, ctx:expressParser.Schema_declContext):
pass
# Enter a parse tree produced by expressParser#schema_id.
def enterSchema_id(self, ctx:expressParser.Schema_idContext):
pass
# Exit a parse tree produced by expressParser#schema_id.
def exitSchema_id(self, ctx:expressParser.Schema_idContext):
pass
# Enter a parse tree produced by expressParser#schema_version_id.
def enterSchema_version_id(self, ctx:expressParser.Schema_version_idContext):
pass
# Exit a parse tree produced by expressParser#schema_version_id.
def exitSchema_version_id(self, ctx:expressParser.Schema_version_idContext):
pass
# Enter a parse tree produced by expressParser#selector.
def enterSelector(self, ctx:expressParser.SelectorContext):
pass
# Exit a parse tree produced by expressParser#selector.
def exitSelector(self, ctx:expressParser.SelectorContext):
pass
# Enter a parse tree produced by expressParser#select_extension.
def enterSelect_extension(self, ctx:expressParser.Select_extensionContext):
pass
# Exit a parse tree produced by expressParser#select_extension.
def exitSelect_extension(self, ctx:expressParser.Select_extensionContext):
pass
# Enter a parse tree produced by expressParser#select_list.
def enterSelect_list(self, ctx:expressParser.Select_listContext):
pass
# Exit a parse tree produced by expressParser#select_list.
def exitSelect_list(self, ctx:expressParser.Select_listContext):
pass
# Enter a parse tree produced by expressParser#select_type.
def enterSelect_type(self, ctx:expressParser.Select_typeContext):
pass
# Exit a parse tree produced by expressParser#select_type.
def exitSelect_type(self, ctx:expressParser.Select_typeContext):
pass
# Enter a parse tree produced by expressParser#set_type.
def enterSet_type(self, ctx:expressParser.Set_typeContext):
pass
# Exit a parse tree produced by expressParser#set_type.
def exitSet_type(self, ctx:expressParser.Set_typeContext):
pass
# Enter a parse tree produced by expressParser#simple_expression.
def enterSimple_expression(self, ctx:expressParser.Simple_expressionContext):
pass
# Exit a parse tree produced by expressParser#simple_expression.
def exitSimple_expression(self, ctx:expressParser.Simple_expressionContext):
pass
# Enter a parse tree produced by expressParser#simple_factor.
def enterSimple_factor(self, ctx:expressParser.Simple_factorContext):
pass
# Exit a parse tree produced by expressParser#simple_factor.
def exitSimple_factor(self, ctx:expressParser.Simple_factorContext):
pass
# Enter a parse tree produced by expressParser#simple_types.
def enterSimple_types(self, ctx:expressParser.Simple_typesContext):
pass
# Exit a parse tree produced by expressParser#simple_types.
def exitSimple_types(self, ctx:expressParser.Simple_typesContext):
pass
# Enter a parse tree produced by expressParser#skip_stmt.
def enterSkip_stmt(self, ctx:expressParser.Skip_stmtContext):
pass
# Exit a parse tree produced by expressParser#skip_stmt.
def exitSkip_stmt(self, ctx:expressParser.Skip_stmtContext):
pass
# Enter a parse tree produced by expressParser#stmt.
def enterStmt(self, ctx:expressParser.StmtContext):
pass
# Exit a parse tree produced by expressParser#stmt.
def exitStmt(self, ctx:expressParser.StmtContext):
pass
# Enter a parse tree produced by expressParser#string_literal.
def enterString_literal(self, ctx:expressParser.String_literalContext):
pass
# Exit a parse tree produced by expressParser#string_literal.
def exitString_literal(self, ctx:expressParser.String_literalContext):
pass
# Enter a parse tree produced by expressParser#string_type.
def enterString_type(self, ctx:expressParser.String_typeContext):
pass
# Exit a parse tree produced by expressParser#string_type.
def exitString_type(self, ctx:expressParser.String_typeContext):
pass
# Enter a parse tree produced by expressParser#subsuper.
def enterSubsuper(self, ctx:expressParser.SubsuperContext):
pass
# Exit a parse tree produced by expressParser#subsuper.
def exitSubsuper(self, ctx:expressParser.SubsuperContext):
pass
# Enter a parse tree produced by expressParser#subtype_constraint.
def enterSubtype_constraint(self, ctx:expressParser.Subtype_constraintContext):
pass
# Exit a parse tree produced by expressParser#subtype_constraint.
def exitSubtype_constraint(self, ctx:expressParser.Subtype_constraintContext):
pass
# Enter a parse tree produced by expressParser#subtype_constraint_body.
def enterSubtype_constraint_body(self, ctx:expressParser.Subtype_constraint_bodyContext):
pass
# Exit a parse tree produced by expressParser#subtype_constraint_body.
def exitSubtype_constraint_body(self, ctx:expressParser.Subtype_constraint_bodyContext):
pass
# Enter a parse tree produced by expressParser#subtype_constraint_decl.
def enterSubtype_constraint_decl(self, ctx:expressParser.Subtype_constraint_declContext):
pass
# Exit a parse tree produced by expressParser#subtype_constraint_decl.
def exitSubtype_constraint_decl(self, ctx:expressParser.Subtype_constraint_declContext):
pass
# Enter a parse tree produced by expressParser#subtype_constraint_head.
def enterSubtype_constraint_head(self, ctx:expressParser.Subtype_constraint_headContext):
pass
# Exit a parse tree produced by expressParser#subtype_constraint_head.
def exitSubtype_constraint_head(self, ctx:expressParser.Subtype_constraint_headContext):
pass
# Enter a parse tree produced by expressParser#subtype_constraint_id.
def enterSubtype_constraint_id(self, ctx:expressParser.Subtype_constraint_idContext):
pass
# Exit a parse tree produced by expressParser#subtype_constraint_id.
def exitSubtype_constraint_id(self, ctx:expressParser.Subtype_constraint_idContext):
pass
# Enter a parse tree produced by expressParser#subtype_declaration.
def enterSubtype_declaration(self, ctx:expressParser.Subtype_declarationContext):
pass
# Exit a parse tree produced by expressParser#subtype_declaration.
def exitSubtype_declaration(self, ctx:expressParser.Subtype_declarationContext):
pass
# Enter a parse tree produced by expressParser#supertype_constraint.
def enterSupertype_constraint(self, ctx:expressParser.Supertype_constraintContext):
pass
# Exit a parse tree produced by expressParser#supertype_constraint.
def exitSupertype_constraint(self, ctx:expressParser.Supertype_constraintContext):
pass
# Enter a parse tree produced by expressParser#supertype_expression.
def enterSupertype_expression(self, ctx:expressParser.Supertype_expressionContext):
pass
# Exit a parse tree produced by expressParser#supertype_expression.
def exitSupertype_expression(self, ctx:expressParser.Supertype_expressionContext):
pass
# Enter a parse tree produced by expressParser#supertype_factor.
def enterSupertype_factor(self, ctx:expressParser.Supertype_factorContext):
pass
# Exit a parse tree produced by expressParser#supertype_factor.
def exitSupertype_factor(self, ctx:expressParser.Supertype_factorContext):
pass
# Enter a parse tree produced by expressParser#supertype_rule.
def enterSupertype_rule(self, ctx:expressParser.Supertype_ruleContext):
pass
# Exit a parse tree produced by expressParser#supertype_rule.
def exitSupertype_rule(self, ctx:expressParser.Supertype_ruleContext):
pass
# Enter a parse tree produced by expressParser#supertype_term.
def enterSupertype_term(self, ctx:expressParser.Supertype_termContext):
pass
# Exit a parse tree produced by expressParser#supertype_term.
def exitSupertype_term(self, ctx:expressParser.Supertype_termContext):
pass
# Enter a parse tree produced by expressParser#syntax.
def enterSyntax(self, ctx:expressParser.SyntaxContext):
pass
# Exit a parse tree produced by expressParser#syntax.
def exitSyntax(self, ctx:expressParser.SyntaxContext):
pass
# Enter a parse tree produced by expressParser#term.
def enterTerm(self, ctx:expressParser.TermContext):
pass
# Exit a parse tree produced by expressParser#term.
def exitTerm(self, ctx:expressParser.TermContext):
pass
# Enter a parse tree produced by expressParser#total_over.
def enterTotal_over(self, ctx:expressParser.Total_overContext):
pass
# Exit a parse tree produced by expressParser#total_over.
def exitTotal_over(self, ctx:expressParser.Total_overContext):
pass
# Enter a parse tree produced by expressParser#type_decl.
def enterType_decl(self, ctx:expressParser.Type_declContext):
pass
# Exit a parse tree produced by expressParser#type_decl.
def exitType_decl(self, ctx:expressParser.Type_declContext):
pass
# Enter a parse tree produced by expressParser#type_id.
def enterType_id(self, ctx:expressParser.Type_idContext):
pass
# Exit a parse tree produced by expressParser#type_id.
def exitType_id(self, ctx:expressParser.Type_idContext):
pass
# Enter a parse tree produced by expressParser#type_label.
def enterType_label(self, ctx:expressParser.Type_labelContext):
pass
# Exit a parse tree produced by expressParser#type_label.
def exitType_label(self, ctx:expressParser.Type_labelContext):
pass
# Enter a parse tree produced by expressParser#type_label_id.
def enterType_label_id(self, ctx:expressParser.Type_label_idContext):
pass
# Exit a parse tree produced by expressParser#type_label_id.
def exitType_label_id(self, ctx:expressParser.Type_label_idContext):
pass
# Enter a parse tree produced by expressParser#unary_op.
def enterUnary_op(self, ctx:expressParser.Unary_opContext):
pass
# Exit a parse tree produced by expressParser#unary_op.
def exitUnary_op(self, ctx:expressParser.Unary_opContext):
pass
# Enter a parse tree produced by expressParser#underlying_type.
def enterUnderlying_type(self, ctx:expressParser.Underlying_typeContext):
pass
# Exit a parse tree produced by expressParser#underlying_type.
def exitUnderlying_type(self, ctx:expressParser.Underlying_typeContext):
pass
# Enter a parse tree produced by expressParser#unique_clause.
def enterUnique_clause(self, ctx:expressParser.Unique_clauseContext):
pass
# Exit a parse tree produced by expressParser#unique_clause.
def exitUnique_clause(self, ctx:expressParser.Unique_clauseContext):
pass
# Enter a parse tree produced by expressParser#unique_rule.
def enterUnique_rule(self, ctx:expressParser.Unique_ruleContext):
pass
# Exit a parse tree produced by expressParser#unique_rule.
def exitUnique_rule(self, ctx:expressParser.Unique_ruleContext):
pass
# Enter a parse tree produced by expressParser#until_control.
def enterUntil_control(self, ctx:expressParser.Until_controlContext):
pass
# Exit a parse tree produced by expressParser#until_control.
def exitUntil_control(self, ctx:expressParser.Until_controlContext):
pass
# Enter a parse tree produced by expressParser#use_clause.
def enterUse_clause(self, ctx:expressParser.Use_clauseContext):
pass
# Exit a parse tree produced by expressParser#use_clause.
def exitUse_clause(self, ctx:expressParser.Use_clauseContext):
pass
# Enter a parse tree produced by expressParser#variable_id.
def enterVariable_id(self, ctx:expressParser.Variable_idContext):
pass
# Exit a parse tree produced by expressParser#variable_id.
def exitVariable_id(self, ctx:expressParser.Variable_idContext):
pass
# Enter a parse tree produced by expressParser#where_clause.
def enterWhere_clause(self, ctx:expressParser.Where_clauseContext):
pass
# Exit a parse tree produced by expressParser#where_clause.
def exitWhere_clause(self, ctx:expressParser.Where_clauseContext):
pass
# Enter a parse tree produced by expressParser#while_control.
def enterWhile_control(self, ctx:expressParser.While_controlContext):
pass
# Exit a parse tree produced by expressParser#while_control.
def exitWhile_control(self, ctx:expressParser.While_controlContext):
pass
# Enter a parse tree produced by expressParser#width.
def enterWidth(self, ctx:expressParser.WidthContext):
pass
# Exit a parse tree produced by expressParser#width.
def exitWidth(self, ctx:expressParser.WidthContext):
pass
# Enter a parse tree produced by expressParser#width_spec.
def enterWidth_spec(self, ctx:expressParser.Width_specContext):
pass
# Exit a parse tree produced by expressParser#width_spec.
def exitWidth_spec(self, ctx:expressParser.Width_specContext):
pass
| 34.212717
| 107
| 0.75022
| 6,986
| 59,188
| 6.182365
| 0.061122
| 0.053207
| 0.088678
| 0.15962
| 0.893401
| 0.868395
| 0.867631
| 0.647048
| 0.63084
| 0.135263
| 0
| 0.000606
| 0.191323
| 59,188
| 1,729
| 108
| 34.232504
| 0.901742
| 0.381226
| 0
| 0.496104
| 1
| 0
| 0.000028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.496104
| false
| 0.496104
| 0.003896
| 0
| 0.501299
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
5f8fb83da3e5a0e37a03fc684620033443eee284
| 13,085
|
py
|
Python
|
examples/modeling/modeling_bert.py
|
bcmi220/ggdp
|
ccd1d25a951c4f3549de5f4701b56746cb3495a2
|
[
"Apache-2.0"
] | 9
|
2019-12-03T16:47:20.000Z
|
2021-07-08T08:58:58.000Z
|
examples/modeling/modeling_bert.py
|
bcmi220/ggdp
|
ccd1d25a951c4f3549de5f4701b56746cb3495a2
|
[
"Apache-2.0"
] | 4
|
2021-02-22T16:43:41.000Z
|
2021-04-13T13:39:47.000Z
|
examples/modeling/modeling_bert.py
|
bcmi220/ggdp
|
ccd1d25a951c4f3549de5f4701b56746cb3495a2
|
[
"Apache-2.0"
] | 5
|
2021-02-22T12:14:16.000Z
|
2021-07-08T08:58:59.000Z
|
import logging
import math
import os
import sys
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from transformers import (BertPreTrainedModel, BertModel)
from .utils_modeling import (BiAAttention, BiLinear)
logger = logging.getLogger(__name__)
class BertForDependencyParsing(BertPreTrainedModel):
def __init__(self, config):
super(BertForDependencyParsing, self).__init__(config)
self.use_postag = config.use_postag
if self.use_postag:
# 采用加的方式整合postag embedding
# 使用0作为pad的postag位置
self.postag_embeddings = nn.Embedding(config.num_postags, config.hidden_size, padding_idx=0)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.arc_h = nn.Linear(config.hidden_size, config.arc_space)
self.arc_c = nn.Linear(config.hidden_size, config.arc_space)
self.attention = BiAAttention(config.arc_space, config.arc_space, 1, biaffine=True)
self.label_h = nn.Linear(config.hidden_size, config.label_space)
self.label_c = nn.Linear(config.hidden_size, config.label_space)
self.bilinear = BiLinear(config.label_space, config.label_space, self.num_labels)
self.init_weights()
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, inputs_embeds=None,
postag_ids=None, head_ids=None, label_ids=None):
# 1. 编码
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
# output size [batch, length, arc_space]
arc_h = F.elu(self.arc_h(sequence_output))
arc_c = F.elu(self.arc_c(sequence_output))
# output size [batch, length, label_space]
label_h = F.elu(self.label_h(sequence_output))
label_c = F.elu(self.label_c(sequence_output))
# apply dropout
# [batch, length, dim] --> [batch, 2 * length, dim]
arcs = torch.cat([arc_h, arc_c], dim=1)
labels = torch.cat([label_h, label_c], dim=1)
arcs = self.dropout(arcs)
arc_h, arc_c = arcs.chunk(2, 1)
labels = self.dropout(labels)
label_h, label_c = labels.chunk(2, 1)
label_h = label_h.contiguous()
label_c = label_c.contiguous()
# [batch, length, length]
out_arc = self.attention(arc_h, arc_c, mask_d=attention_mask, mask_e=attention_mask).squeeze(dim=1)
batch, max_len, label_space = label_h.size()
if head_ids is not None and label_ids is not None:
# create batch index [batch]
batch_index = torch.arange(0, batch).type_as(out_arc).long()
# get vector for head_ids [batch, length, label_space],
label_h = label_h[batch_index, head_ids.t()].transpose(0, 1).contiguous()
# compute output for type [batch, length, num_labels]
out_label = self.bilinear(label_h, label_c)
# mask invalid position to -inf for log_softmax
if attention_mask is not None:
minus_inf = -1e8
minus_mask = (1 - attention_mask) * minus_inf
out_arc = out_arc + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
# loss_arc shape [batch, length, length]
loss_arc = F.log_softmax(out_arc, dim=1)
# loss_label shape [batch, length, num_labels]
loss_label = F.log_softmax(out_label, dim=2)
# mask invalid position to 0 for sum loss
if attention_mask is not None:
loss_arc = loss_arc * attention_mask.unsqueeze(2) * attention_mask.unsqueeze(1)
loss_label = loss_label * attention_mask.unsqueeze(2)
# number of valid positions which contribute to loss (remove the symbolic head for each sentence.
num = attention_mask.sum() - batch
else:
# number of valid positions which contribute to loss (remove the symbolic head for each sentence.
num = float(max_len - 1) * batch
# first create index matrix [length, batch]
child_index = torch.arange(0, max_len).view(max_len, 1).expand(max_len, batch)
child_index = child_index.type_as(out_arc).long()
# [length-1, batch]
loss_arc = loss_arc[batch_index, head_ids.t(), child_index][1:]
loss_label = loss_label[batch_index, child_index, label_ids.t()][1:]
total_loss = (-loss_arc.sum() / num) + (-loss_label.sum() / num)
outputs = (total_loss, )
else:
label_h = label_h.unsqueeze(2).expand(batch, max_len, max_len, label_space).contiguous()
label_c = label_c.unsqueeze(1).expand(batch, max_len, max_len, label_space).contiguous()
# compute output for label [batch, length, length, num_labels]
out_label = self.bilinear(label_h, label_c)
# mask invalid position to -inf for log_softmax
if attention_mask is not None:
minus_inf = -1e8
minus_mask = (1 - attention_mask) * minus_inf
out_arc = out_arc + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
# logits_arc shape [batch, length, length]
logits_arc = F.log_softmax(out_arc, dim=1)
# logits_label shape [batch, num_labels, length, length]
logits_label = F.log_softmax(out_label, dim=3).permute(0, 3, 1, 2)
# [batch, num_labels, length, length]
energy = torch.exp(logits_arc.unsqueeze(1) + logits_label)
outputs = (energy, logits_arc, logits_label, )
return outputs
class BertForDependencyParsingWithOrder(BertPreTrainedModel):
def __init__(self, config):
super(BertForDependencyParsingWithOrder, self).__init__(config)
self.use_postag = config.use_postag
if self.use_postag:
# 采用加的方式整合postag embedding
# 使用0作为pad的postag位置
self.postag_embeddings = nn.Embedding(config.num_postags, config.hidden_size, padding_idx=0)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.arc_h = nn.Linear(config.hidden_size, config.arc_space)
self.arc_c = nn.Linear(config.hidden_size, config.arc_space)
self.attention = BiAAttention(config.arc_space, config.arc_space, 1, biaffine=True)
self.label_h = nn.Linear(config.hidden_size, config.label_space)
self.label_c = nn.Linear(config.hidden_size, config.label_space)
self.bilinear = BiLinear(config.label_space, config.label_space, self.num_labels)
self.max_parsing_order = config.max_parsing_order
self.order_hidden = nn.Linear(config.hidden_size, config.order_space)
self.order_classifier = nn.Linear(config.order_space, self.max_parsing_order)
self.init_weights()
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, inputs_embeds=None,
postag_ids=None, order_ids=None, head_ids=None, label_ids=None):
# 1. 编码
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
# output size [batch, length, arc_space]
arc_h = F.elu(self.arc_h(sequence_output))
arc_c = F.elu(self.arc_c(sequence_output))
# output size [batch, length, label_space]
label_h = F.elu(self.label_h(sequence_output))
label_c = F.elu(self.label_c(sequence_output))
# output size [batch, length, order_space]
order_h = F.elu(self.order_hidden(sequence_output))
# apply dropout
# [batch, length, dim] --> [batch, 2 * length, dim]
arcs = torch.cat([arc_h, arc_c], dim=1)
labels = torch.cat([label_h, label_c], dim=1)
arcs = self.dropout(arcs)
arc_h, arc_c = arcs.chunk(2, 1)
labels = self.dropout(labels)
label_h, label_c = labels.chunk(2, 1)
label_h = label_h.contiguous()
label_c = label_c.contiguous()
order_h = self.dropout(order_h)
# [batch, length, length]
out_arc = self.attention(arc_h, arc_c, mask_d=attention_mask, mask_e=attention_mask).squeeze(dim=1)
# [batch, length, max_parsing_order]
logits_order = self.order_classifier(order_h)
batch, max_len, label_space = label_h.size()
if order_ids is not None and head_ids is not None and label_ids is not None:
# create batch index [batch]
batch_index = torch.arange(0, batch).type_as(out_arc).long()
# get vector for head_ids [batch, length, label_space],
label_h = label_h[batch_index, head_ids.t()].transpose(0, 1).contiguous()
# compute output for type [batch, length, num_labels]
out_label = self.bilinear(label_h, label_c)
# mask invalid position to -inf for log_softmax
if attention_mask is not None:
minus_inf = -1e8
minus_mask = (1 - attention_mask) * minus_inf
out_arc = out_arc + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
# loss_arc shape [batch, length, length]
loss_arc = F.log_softmax(out_arc, dim=1)
# loss_label shape [batch, length, num_labels]
loss_label = F.log_softmax(out_label, dim=2)
# mask invalid position to 0 for sum loss
if attention_mask is not None:
loss_arc = loss_arc * attention_mask.unsqueeze(2) * attention_mask.unsqueeze(1)
loss_label = loss_label * attention_mask.unsqueeze(2)
# number of valid positions which contribute to loss (remove the symbolic head for each sentence.
num = attention_mask.sum() - batch
else:
# number of valid positions which contribute to loss (remove the symbolic head for each sentence.
num = float(max_len - 1) * batch
# first create index matrix [length, batch]
child_index = torch.arange(0, max_len).view(max_len, 1).expand(max_len, batch)
child_index = child_index.type_as(out_arc).long()
# [length-1, batch]
loss_arc = loss_arc[batch_index, head_ids.t(), child_index][1:]
loss_label = loss_label[batch_index, child_index, label_ids.t()][1:]
#
loss_fct = CrossEntropyLoss(reduction="sum")
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits_order = logits_order.view(-1, self.max_parsing_order)[active_loss]
active_order_ids = order_ids.view(-1)[active_loss]
loss_order = loss_fct(active_logits_order, active_order_ids)
else:
loss_order = loss_fct(logits_order.view(-1, self.max_parsing_order), order_ids.view(-1))
total_loss = (-loss_arc.sum() / num) + (-loss_label.sum() / num) + (loss_order / num)
outputs = (total_loss, )
else:
label_h = label_h.unsqueeze(2).expand(batch, max_len, max_len, label_space).contiguous()
label_c = label_c.unsqueeze(1).expand(batch, max_len, max_len, label_space).contiguous()
# compute output for label [batch, length, length, num_labels]
out_label = self.bilinear(label_h, label_c)
# mask invalid position to -inf for log_softmax
if attention_mask is not None:
minus_inf = -1e8
minus_mask = (1 - attention_mask) * minus_inf
out_arc = out_arc + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
# logits_arc shape [batch, length, length]
logits_arc = F.log_softmax(out_arc, dim=1)
# logits_label shape [batch, num_labels, length, length]
logits_label = F.log_softmax(out_label, dim=3).permute(0, 3, 1, 2)
# [batch, num_labels, length, length]
energy = torch.exp(logits_arc.unsqueeze(1) + logits_label)
outputs = (energy, logits_arc, logits_label, logits_order)
return outputs
| 43.762542
| 113
| 0.62606
| 1,708
| 13,085
| 4.525176
| 0.086651
| 0.050459
| 0.019925
| 0.023289
| 0.884202
| 0.882262
| 0.867771
| 0.861431
| 0.852374
| 0.844482
| 0
| 0.01061
| 0.27971
| 13,085
| 298
| 114
| 43.909396
| 0.809443
| 0.163011
| 0
| 0.80226
| 0
| 0
| 0.000275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022599
| false
| 0
| 0.056497
| 0
| 0.101695
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5fb158ca13e88dfb2784f05f9745a04b8fb21b2a
| 44,926
|
py
|
Python
|
axisafe/elements.py
|
michalkalkowski/axisym-safe-python
|
7456d32b1a5ac7ca3881ade9b597bfef18acdc3e
|
[
"MIT"
] | 4
|
2019-07-03T07:31:18.000Z
|
2021-08-30T17:13:06.000Z
|
axisafe/elements.py
|
michalkalkowski/axisym-safe-python
|
7456d32b1a5ac7ca3881ade9b597bfef18acdc3e
|
[
"MIT"
] | null | null | null |
axisafe/elements.py
|
michalkalkowski/axisym-safe-python
|
7456d32b1a5ac7ca3881ade9b597bfef18acdc3e
|
[
"MIT"
] | 2
|
2019-09-23T09:10:22.000Z
|
2022-01-05T01:12:30.000Z
|
"""
==============================================================================
Copyright (C) 2016--2017 Michal Kalkowski (MIT License)
kalkowski.m@gmail.com
This is a part of the axisafe package developed for simulating elastic
wave propagation in buried/submerged fluid-filled waveguides. The package is
based on the publication:
Kalkowski MK et al. Axisymmetric semi-analytical finite elements for modelling
waves in buried/submerged fluid-filled waveguides. Comput Struct (2017),
https://doi.org/10.1016/j.compstruc.2017.10.004
This file contains element definitions.
==============================================================================
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import splrep, splev
from . import shape_functions as shape_fun
def gamma_PML(x, gamma, PML_start, PML_thk):
"""
Polynomial stretching profile for a perfectly matched layer.
Parameters:
x : physical coordinate
gamma : average value of the profile
PML_start : where the PML starts
PML_thk : thickness of the PML
Returns:
the value of the profile function at x
"""
return 1 + 3*(gamma - 1)*((abs(x - PML_start))/PML_thk)**2
def gamma_PML_exp(x, gamma, PML_start, PML_thk):
"""
Polynomial stretching profile for a perfectly matched layer.
Parameters:
x : physical coordinate
gamma : profile parameters in the form a + 1j*b
PML_start : where the PML starts
PML_thk : thickness of the PML
Returns:
the value of the profile function at x
"""
local_x = (x - PML_start)/PML_thk
return np.exp(gamma.real*local_x) - 1j*(np.exp(gamma.imag*local_x)- 1)
class SLAX6(object):
"""
Class for an axisymmetric 1D line element with nodes wrt GLL quadrature
and higher order Lagrange interpolating polynomials
u = [ur utheta uz]
strain directions = [rr tt zz tz rz rt]
n is the circumferential order number
Here six matrices are assembled and the circumferential order is applied
at a later stage
"""
def __init__(self, nodes_at, n):
"""
Initialise the element with node locations.
Parameters:
nodes_at : node locations
"""
self.n = n
self.nodes_at = nodes_at
self.nodes_per_el = len(nodes_at)
self.no_of_dofs = sum([3]*self.nodes_per_el)
# define basic properties of the element (no of nodes, etc.)
self.dofs_per_node = [3]*self.nodes_per_el
self.dofs_domain = [['s']*3]*self.nodes_per_el
self.dofs_per_el = sum(self.dofs_per_node)
# define T transormation component
self.T_components = [[1, 1j, 1j]]*self.nodes_per_el
self.C = None
self.loss_factor = None
self.rho = None
# Pre-allocate the arrays
self.K_Fz = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_Ft = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_F = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_F1 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_F2 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_F3 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_1 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_2 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_3 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_4 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_5 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_6 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.M = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex64')
self.Hs0 = None
self.Hs1 = None
def add_properties(self, list_of_props):
"""Assign mechanical properties and the circumferential order.
Parameters:
list_of_props : [lame_1, lame_2, denisty, loss_factor]
n : circumferential order
"""
# assign the properties
self.loss_factor = list_of_props[3]
lame_1 = list_of_props[0]
lame_2 = list_of_props[1]
self.C = (1 + self.loss_factor*1.0j)*\
np.array([(lame_1 + 2*lame_2, lame_1, lame_1, 0, 0, 0),
(lame_1, lame_1 + 2*lame_2, lame_1, 0, 0, 0),
(lame_1, lame_1, lame_1 + 2*lame_2, 0, 0, 0),
(0, 0, 0, lame_2, 0, 0),
(0, 0, 0, 0, lame_2, 0),
(0, 0, 0, 0, 0, lame_2)])
self.rho = list_of_props[2]
def inspect_shape_fun(self):
"""
This function computes and plots shape functions in the natural coordinates
together with their derivatives.
(not updated for long)
"""
ksi, _ = shape_fun.build_GLL_quadrature(len(self.nodes_at))
xi_dense = np.linspace(-1, 1, 100)
N, dN = shape_fun.lagrange_poly(xi_dense, len(ksi))
_, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1_limits = 1.2*np.min(N), 1.2*np.max(N)
ax2_limits = 1.2*np.min(dN), 1.2*np.max(dN)
ax2.set_xlabel(r'$\xi$')
ax1.set_xlabel(r'shape functions')
ax2.set_xlabel(r'shape functions derivatives')
for this_xi in ksi:
ax1.plot(2*[this_xi], list(ax1_limits), '--', c='red')
ax2.plot(2*[this_xi], list(ax2_limits), '--', c='red')
ax1.plot(xi_dense, N.T)
ax2.plot(xi_dense, dN.T)
ax1.set_title(r'Spectral element of order ' + \
str(len(self.nodes_at) - 1) + ' with ' +\
str(len(self.nodes_at)) + ' nodes.')
def calculate_matrices(self):
"""Calculates the shape function and B matrices including
Jacobian*Gauss Weight* det(Jacobian) at all Gauss quadrature points"""
# get the nodal locations weights and shape functions for a given GLL quadrature
ksis, weights, NN, NN_r, JJ = \
shape_fun.line_spectral_GLL_lagrange(self.nodes_at)
# extract the matrices and sum at each integration point
for i in range(len(ksis)):
# shape funcion values at current location are NN[:, i]
# generate the shape function matrix in standard form
diagonals = [[funct]*3 for funct in NN[:, i]]
N = np.column_stack([np.diag(diagonal) for diagonal in diagonals])
diagonals_r = [[funct]*3 for funct in NN_r[:, i]]
N_r = np.column_stack([np.diag(diagonal) for diagonal in diagonals_r])
# local radius in physical coordinates
r_i = self.nodes_at.dot(NN[:, i])
# differential operator matrices
L_z = np.zeros([6, 3])
L_theta = np.zeros([6, 3])
L_r = np.zeros([6, 3])
L = np.zeros([6, 3])
L_z[2, 2], L_z[3, 1], L_z[4, 0] = 1, 1, 1
L_r[0, 0], L_r[4, 2], L_r[5, 1] = 1, 1, 1
L[1, 0], L[5, 1] = 1.0, -1.0
L_theta[1, 1], L_theta[3, 2], L_theta[5, 0] = 1, 1, 1
B_1 = L.dot(N)/r_i + L_r.dot(N_r)
B_2 = L_theta.dot(N)/r_i
B_3 = L_z.dot(N)
K_F1 = B_1.T.dot(self.C).dot(B_3)
K_F2 = B_1.T.dot(self.C).dot(B_2)
K_F3 = B_2.T.dot(self.C).dot(B_3)
K_1 = B_1.T.dot(self.C).dot(B_1)
K_2 = K_F2.T - K_F2
K_3 = K_F1.T - K_F1
K_4 = K_F3.T + K_F3
K_5 = B_2.T.dot(self.C).dot(B_2)
K_6 = B_3.T.dot(self.C).dot(B_3)
K_F = K_F1.T
K_Ft = K_F3.T
K_Fz = K_6
M = self.rho*N.T.dot(N)
# perform the integration
self.K_F += 2*np.pi*weights[i]*K_F*JJ[i]*r_i
self.K_Ft += 2*np.pi*weights[i]*K_Ft*JJ[i]*r_i
self.K_Fz += 2*np.pi*weights[i]*K_Fz*JJ[i]*r_i
self.K_F1 += 2*np.pi*weights[i]*K_F1*JJ[i]*r_i
self.K_F2 += 2*np.pi*weights[i]*K_F2*JJ[i]*r_i
self.K_F3 += 2*np.pi*weights[i]*K_F3*JJ[i]*r_i
self.K_1 += 2*np.pi*weights[i]*K_1*JJ[i]*r_i
self.K_2 += 2*np.pi*weights[i]*K_2*JJ[i]*r_i
self.K_3 += 2*np.pi*weights[i]*K_3*JJ[i]*r_i
self.K_4 += 2*np.pi*weights[i]*K_4*JJ[i]*r_i
self.K_5 += 2*np.pi*weights[i]*K_5*JJ[i]*r_i
self.K_6 += 2*np.pi*weights[i]*K_6*JJ[i]*r_i
self.M += 2*np.pi*weights[i]*M*JJ[i]*r_i
# if the element is coupled to fluid elements - create the ingredients
# of the coupling matrices
normal = np.array([1, 0, 0]).reshape(-1, 1)
diagonals = [[funct]*3 for funct in NN[:, 0]]
N_u = np.column_stack([np.diag(diagonal) for diagonal in diagonals])
self.Hs0 = 2*np.pi*N_u.T.dot(normal)
diagonals = [[funct]*3 for funct in NN[:, -1]]
N_u = np.column_stack([np.diag(diagonal) for diagonal in diagonals])
self.Hs1 = 2*np.pi*N_u.T.dot(normal)*self.nodes_at[-1]
class ALAX6(object):
"""
Class for an acoustic axisymmetric 1D line element with nodes wrt GLL quadrature
and higher order Lagrange interpolating polynomials
Each node has one degree of freedom- velocity potential
n is the circumferential order number"""
def __init__(self, nodes_at, n):
"""
Initialise the element with node locations.
Parameters:
nodes_at : node locations
"""
self.nodes_at = nodes_at
self.nodes_per_el = len(nodes_at)
self.no_of_dofs = self.nodes_per_el
# define basic properties of the element
self.dofs_per_node = [1]*self.nodes_per_el
self.dofs_domain = [['f']]*self.nodes_per_el
self.dofs_per_el = sum(self.dofs_per_node)
# define T-transformation components
self.T_components = [[1]]*self.nodes_per_el
self.c_p = None
self.rho = None
self.n = n
# Pre-allocate the arrays
self.K_Fz = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_Ft = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_F = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_1 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dz*dz
self.K_2 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta*dz
self.K_3 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dz
self.K_4 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta*dtheta
self.K_5 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta
self.K_6 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # -
self.M = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='float64')
self.Hf0 = None
self.Hf1 = None
def add_properties(self, list_of_props):
"""Assign mechanical properties.
Parameters:
list_of_props : [bulk modulus, density]
"""
# assign properties
self.c_p = (list_of_props[0]/list_of_props[1])**0.5
self.rho = list_of_props[1]
def calculate_matrices(self):
"""Calculates the shape function and b matrices including
Jacobian*Gauss Weight* det(Jacobian) at all Gauss quadrature points"""
# get the nodal locations weights and shape functions for a given GLL quadrature
ksis, weights, NN, NN_r, JJ = \
shape_fun.line_spectral_GLL_lagrange(self.nodes_at)
# extract the matrices and sum at each integration point
for i in range(len(ksis)):
# shape funcion values at current location are NN[:, i]
# generate the shape function matrix in standard form
N = NN[:, i].reshape(1, -1)
N_r = NN_r[:, i].reshape(1, -1)
# local radius in physical coordinates
r_i = self.nodes_at.dot(NN[:, i])
K_1 = N_r.T.dot(N_r)
K_5 = 1/r_i**2*N.T.dot(N)
K_6 = N.T.dot(N)
M = N.T.dot(N)/self.c_p**2
# perform the integration
self.K_6 += -self.rho*2*np.pi*weights[i]*K_6*JJ[i]*r_i
self.K_1 += -self.rho*2*np.pi*weights[i]*K_1*JJ[i]*r_i
self.K_5 += -self.rho*2*np.pi*weights[i]*K_5*JJ[i]*r_i
self.K_Fz += -self.rho*2*np.pi*weights[i]*K_6*JJ[i]*r_i
self.K_Ft += -self.rho*2*np.pi*weights[i]*K_5*JJ[i]*r_i
self.M += -self.rho*2*np.pi*weights[i]*M*JJ[i]*r_i
# if the element is coupled to solid elements - create the ingredients
# of the coupling matrices
N_phi = NN[:, 0].reshape(-1, 1)
self.Hf0 = self.rho*N_phi
N_phi = NN[:, -1].reshape(-1, 1)
self.Hf1 = self.rho*N_phi*self.nodes_at[-1]
class SLAX6_core(object):
"""
Class for an axisymmetric core (with a node at the axis of symmetry)
1D line element with nodes wrt GLJ quadrature and higher order Lagrange
interpolating polynomials
u = [ur utheta uz]
strain directions = [rr tt zz tz rz rt]
n is the circumferential order number
Here six matrices are assembled and the circumferential order is applied
at a later stage"""
def __init__(self, nodes_at, n):
"""
Initialise the element with node locations.
Parameters:
nodes_at : node locations
"""
self.nodes_at = nodes_at
self.nodes_per_el = len(nodes_at)
# double-check if there is a node at the axis of symmetry
# specify basic element characteristics given the circumferential order
if 0 in self.nodes_at:
if n == 0:
self.dofs_per_node = [1] + [3]*(self.nodes_per_el - 1)
self.dofs_domain = [['s']*1] + [['s']*3]*(self.nodes_per_el - 1)
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[1j]] + [[1, 1j, 1j]]*(self.nodes_per_el - 1)
elif n == 1:
self.dofs_per_node = [2] + [3]*(self.nodes_per_el - 1)
self.dofs_domain = [['s']*2] + [['s']*3]*(self.nodes_per_el - 1)
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[1, 1j]] + [[1, 1j, 1j]]*(self.nodes_per_el - 1)
elif n > 1:
self.dofs_per_node = [0] + [3]*(self.nodes_per_el - 1)
self.dofs_domain = [[]] + [['s']*3]*(self.nodes_per_el - 1)
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[]] + [[1, 1j, 1j]]*(self.nodes_per_el - 1)
else:
self.dofs_per_node = [3]*self.nodes_per_el
self.dofs_domain = [['s']*3]*self.nodes_per_el
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[1, 1j, 1j]]*self.nodes_per_el
self.n = n
self.C = None
self.loss_factor = None
self.rho = None
# preallocate the arrays
# use full size - respective dofs will be rmoved at a later stage
no_of_dofs = sum([3]*self.nodes_per_el)
# keep the attribute no of dofs not set
self.no_of_dofs = None
self.K_Fz = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_Ft = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_F = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_F1 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_F2 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_F3 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_1 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_2 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_3 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_4 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_5 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.K_6 = np.zeros([no_of_dofs, no_of_dofs], dtype='complex')
self.M = np.zeros([no_of_dofs, no_of_dofs], dtype='complex64')
self. Hs1 = None
def add_properties(self, list_of_props):
"""Assign mechanical properties and the circumferential order.
Parameters:
list_of_props : [lame_1, lame_2, denisty, loss_factor]
n : circumferential order
"""
# assign the properties
self.loss_factor = list_of_props[3]
lame_1 = list_of_props[0]
lame_2 = list_of_props[1]
self.C = (1 + self.loss_factor*1.0j)*\
np.array([(lame_1 + 2*lame_2, lame_1, lame_1, 0, 0, 0),
(lame_1, lame_1 + 2*lame_2, lame_1, 0, 0, 0),
(lame_1, lame_1, lame_1 + 2*lame_2, 0, 0, 0),
(0, 0, 0, lame_2, 0, 0),
(0, 0, 0, 0, lame_2, 0),
(0, 0, 0, 0, 0, lame_2)])
self.rho = list_of_props[2]
def inspect_shape_fun(self):
"""
This function computes and plots shape functions in the natural coordinates
together with their derivatives.
(not updated for long)
"""
ksi, _ = shape_fun.build_GLJ_quadrature(len(self.nodes_at))
xi_dense = np.linspace(-1, 1, 100)
N, _ = shape_fun.lagrange_GLJ(xi_dense, len(ksi))
dN = np.column_stack([splev(xi_dense, splrep(xi_dense, N[:, i]), der=1) \
for i in range(N.shape[1])])
_, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1_limits = 1.2*np.min(N), 1.2*np.max(N)
ax2_limits = 1.2*np.min(dN), 1.2*np.max(dN)
ax2.set_xlabel(r'$\xi$')
ax1.set_xlabel(r'shape functions')
ax2.set_xlabel(r'shape functions derivatives')
for this_xi in ksi:
ax1.plot(2*[this_xi], list(ax1_limits), '--', c='red')
ax2.plot(2*[this_xi], list(ax2_limits), '--', c='red')
ax1.plot(xi_dense, N)
ax2.plot(xi_dense, dN)
ax1.set_title(r'Spectral element of order ' + \
str(len(self.nodes_at) - 1) + ' with ' +\
str(len(self.nodes_at)) + ' nodes.')
def calculate_matrices(self):
"""Calculates the shape function and B matrices including
Jacobian*Gauss Weight* det(Jacobian) at all Gauss quadrature points"""
# get the nodal locations weights and shape functions for a given GLJ quadrature
ksis, weights, NN, NN_r, JJ = \
shape_fun.line_spectral_GLJ_lagrange(self.nodes_at)
# extract the matrices and sum at each integration point
for i, ksi in enumerate(list(ksis)):
# shape funcion values at current location are NN[:, i]
# generate the shape function matrix in standard form
diagonals = [[funct]*3 for funct in NN[:, i]]
N = np.column_stack([np.diag(diagonal) for diagonal in diagonals])
diagonals_r = [[funct]*3 for funct in NN_r[:, i]]
N_r = np.column_stack([np.diag(diagonal) for diagonal in diagonals_r])
# local r in physical coordinates
r_i = self.nodes_at[i]
# scale the radius according to the de'l'Hospital's rule for the axis
# singularity
w = 1 + ksi
if np.round(r_i, 7) == 0:
rat = JJ[i]
else:
rat = r_i/w
# differential operator matrices
L_z = np.zeros([6, 3])
L_theta = np.zeros([6, 3])
L_r = np.zeros([6, 3])
L = np.zeros([6, 3])
L_z[2, 2], L_z[3, 1], L_z[4, 0] = 1, 1, 1
L_r[0, 0], L_r[4, 2], L_r[5, 1] = 1, 1, 1
L[1, 0], L[5, 1] = 1.0, -1.0
L_theta[1, 1], L_theta[3, 2], L_theta[5, 0] = 1, 1, 1
# use different forms for the axis of symmetry and the rest of the domain
if np.round(r_i, 7) == 0:
B_1 = L.dot(N_r) + L_r.dot(N_r)
B_2 = L_theta.dot(N_r)
B_3 = L_z.dot(N)
else:
B_1 = L.dot(N)/r_i + L_r.dot(N_r)
B_2 = L_theta.dot(N)/r_i
B_3 = L_z.dot(N)
K_F1 = B_1.T.dot(self.C).dot(B_3)
K_F2 = B_1.T.dot(self.C).dot(B_2)
K_F3 = B_2.T.dot(self.C).dot(B_3)
K_1 = B_1.T.dot(self.C).dot(B_1)
K_2 = K_F2.T - K_F2
K_3 = K_F1.T - K_F1
K_4 = K_F3.T + K_F3
K_5 = B_2.T.dot(self.C).dot(B_2)
K_6 = B_3.T.dot(self.C).dot(B_3)
K_F = K_F1.T
K_Ft = K_F3.T
K_Fz = K_6
M = self.rho*N.T.dot(N)
# GLJ integration
self.K_F += 2*np.pi*weights[i]*K_F*JJ[i]*rat
self.K_Ft += 2*np.pi*weights[i]*K_Ft*JJ[i]*rat
self.K_Fz += 2*np.pi*weights[i]*K_Fz*JJ[i]*rat
self.K_F1 += 2*np.pi*weights[i]*K_F1*JJ[i]*rat
self.K_F2 += 2*np.pi*weights[i]*K_F2*JJ[i]*rat
self.K_F3 += 2*np.pi*weights[i]*K_F3*JJ[i]*rat
self.K_1 += 2*np.pi*weights[i]*K_1*JJ[i]*rat
self.K_2 += 2*np.pi*weights[i]*K_2*JJ[i]*rat
self.K_3 += 2*np.pi*weights[i]*K_3*JJ[i]*rat
self.K_4 += 2*np.pi*weights[i]*K_4*JJ[i]*rat
self.K_5 += 2*np.pi*weights[i]*K_5*JJ[i]*rat
self.K_6 += 2*np.pi*weights[i]*K_6*JJ[i]*rat
self.M += 2*np.pi*weights[i]*M*JJ[i]*rat
# if the element is coupled to fluid elements - create the ingredients
# of the coupling matrices
normal = np.array([1, 0, 0]).reshape(-1, 1)
diagonals = [[funct]*3 for funct in NN[:, -1]]
N_u = np.column_stack([np.diag(diagonal) for diagonal in diagonals])
self.Hs1 = 2*np.pi*N_u.T.dot(normal)*self.nodes_at[-1]
# apply the boundary conditions at the axis of symmetry
if 0 in self.nodes_at:
if self.n == 0:
self.K_1 = np.delete(np.delete(self.K_1, [0, 1], 0), [0, 1], 1)
self.K_2 = np.delete(np.delete(self.K_2, [0, 1], 0), [0, 1], 1)
self.K_3 = np.delete(np.delete(self.K_3, [0, 1], 0), [0, 1], 1)
self.K_4 = np.delete(np.delete(self.K_4, [0, 1], 0), [0, 1], 1)
self.K_5 = np.delete(np.delete(self.K_5, [0, 1], 0), [0, 1], 1)
self.K_6 = np.delete(np.delete(self.K_6, [0, 1], 0), [0, 1], 1)
self.M = np.delete(np.delete(self.M, [0, 1], 0), [0, 1], 1)
self.K_F = np.delete(np.delete(self.K_F, [0, 1], 0), [0, 1], 1)
self.K_Ft = np.delete(np.delete(self.K_Ft, [0, 1], 0), [0, 1], 1)
self.K_Fz = np.delete(np.delete(self.K_Fz, [0, 1], 0), [0, 1], 1)
self.Hs1 = 2*np.pi*N_u[:, 2:].T.dot(normal)*self.nodes_at[-1]
elif self.n == 1:
self.K_1 = np.delete(np.delete(self.K_1, [2], 0), [2], 1)
self.K_2 = np.delete(np.delete(self.K_2, [2], 0), [2], 1)
self.K_3 = np.delete(np.delete(self.K_3, [2], 0), [2], 1)
self.K_4 = np.delete(np.delete(self.K_4, [2], 0), [2], 1)
self.K_5 = np.delete(np.delete(self.K_5, [2], 0), [2], 1)
self.K_6 = np.delete(np.delete(self.K_6, [2], 0), [2], 1)
self.M = np.delete(np.delete(self.M, [2], 0), [2], 1)
self.K_F = np.delete(np.delete(self.K_F, [2], 0), [2], 1)
self.K_Ft = np.delete(np.delete(self.K_Ft, [2], 0), [2], 1)
self.K_Fz = np.delete(np.delete(self.K_Fz, [2], 0), [2], 1)
self.Hs1 = 2*np.pi*np.delete(N_u, [2], 1).T.dot(normal)*self.nodes_at[-1]
elif self.n > 1:
self.K_1 = np.delete(np.delete(self.K_1, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_2 = np.delete(np.delete(self.K_2, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_3 = np.delete(np.delete(self.K_3, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_4 = np.delete(np.delete(self.K_4, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_5 = np.delete(np.delete(self.K_5, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_6 = np.delete(np.delete(self.K_6, [0, 1, 2], 0),
[0, 1, 2], 1)
self.M = np.delete(np.delete(self.M, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_F = np.delete(np.delete(self.K_F, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_Ft = np.delete(np.delete(self.K_Ft, [0, 1, 2], 0),
[0, 1, 2], 1)
self.K_Fz = np.delete(np.delete(self.K_Fz, [0, 1, 2], 0),
[0, 1, 2], 1)
self.Hs1 = 2*np.pi*N_u[:, 3:].T.dot(normal)*self.nodes_at[-1]
self.no_of_dofs = self.K_1.shape[0]
class ALAX6_core(object):
"""
Class for an acoustic axisymmetric 1D line core element (with a node at the axis
of symmetry) with nodes wrt GLJ quadrature and higher order Lagrange
interpolating polynomials. Each node has one degree of freedom- velocity potential
n is the circumferential order number"""
def __init__(self, nodes_at, n):
"""
Initialise the element with node locations.
Parameters:
nodes_at : node locations
"""
self.nodes_at = nodes_at
self.nodes_per_el = len(nodes_at)
self.no_of_dofs = self.nodes_per_el
self.n = n
# doulbe-check if there is a node at the axis of symmetry
# define basic properties of the element and T-transformation
if 0 in self.nodes_at and n != 0:
self.dofs_per_node = [0] + [1]*(self.nodes_per_el - 1)
self.dofs_domain = [[]] + [['f']]*(self.nodes_per_el - 1)
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[]] + [[1]]*(self.nodes_per_el - 1)
else:
self.dofs_per_node = [1]*self.nodes_per_el
self.dofs_domain = [['f']]*self.nodes_per_el
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[1]]*self.nodes_per_el
self.c_p = None
self.rho = None
# preallocate the arrays
self.K_Fz = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_Ft = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_F = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_1 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dz*dz
self.K_2 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta*dz
self.K_3 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dz
self.K_4 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta*dtheta
self.K_5 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta
self.K_6 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # -
self.M = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='float64')
self.Hf1 = None
def add_properties(self, list_of_props):
"""Assign mechanical properties.
Parameters:
list_of_props : [bulk modulus, density]
"""
# assign properties
self.c_p = (list_of_props[0]/list_of_props[1])**0.5
self.rho = list_of_props[1]
def inspect_shape_fun(self):
"""
This function computes and plots shape functions in the natural coordinates
together with their derivatives
(not updated for long)
"""
ksi, _ = shape_fun.build_GLJ_quadrature(len(self.nodes_at))
xi_dense = np.linspace(-1, 1, 100)
N, _ = shape_fun.lagrange_GLJ(xi_dense, len(ksi))
dN = np.column_stack([splev(xi_dense, splrep(xi_dense, N[:, i]), der=1) \
for i in range(N.shape[1])])
_, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1_limits = 1.2*np.min(N), 1.2*np.max(N)
ax2_limits = 1.2*np.min(dN), 1.2*np.max(dN)
ax2.set_xlabel(r'$\xi$')
ax1.set_xlabel(r'shape functions')
ax2.set_xlabel(r'shape functions derivatives')
for this_xi in ksi:
ax1.plot(2*[this_xi], list(ax1_limits), '--', c='red')
ax2.plot(2*[this_xi], list(ax2_limits), '--', c='red')
ax1.plot(xi_dense, N)
ax2.plot(xi_dense, dN)
ax1.set_title(r'Spectral element of order ' + \
str(len(self.nodes_at) - 1) + ' with ' +\
str(len(self.nodes_at)) + ' nodes.')
def calculate_matrices(self):
"""Calculates the shape function and b matrices including
Jacobian*Gauss Weight* det(Jacobian) at all Gauss quadrature points"""
# get the nodal locations weights and shape functions for a given GLJ quadrature
ksis, weights, NN, NN_r, JJ = \
shape_fun.line_spectral_GLJ_lagrange(self.nodes_at)
# extract the matrices and sum at each integration point
for i, ksi in enumerate(list(ksis)):
# shape funcion values at current location are NN[:, i]
# generate the shape function matrix in standard form
N = NN[:, i].reshape(1, -1)
N_r = NN_r[:, i].reshape(1, -1)
# local r in physcial coordinates
r_i = self.nodes_at.dot(NN[:, i])
# scale the radius according to the de'l'Hospital's rule for the axis
# singularity
w = 1 + ksi
if np.round(r_i, 7) == 0:
rat = JJ[i]
else:
rat = r_i/w
# use different forms for the axis of symmetri and the rest of the domain
if np.round(r_i, 7) == 0:
K_5 = N_r.T.dot(N_r)
else:
K_5 = 1/r_i**2*N.T.dot(N)
K_1 = N_r.T.dot(N_r)
K_6 = N.T.dot(N)
M = N.T.dot(N)/self.c_p**2
# GLJ integration
self.K_1 += -self.rho*2*np.pi*weights[i]*K_1*JJ[i]*rat
self.K_5 += -self.rho*2*np.pi*weights[i]*K_5*JJ[i]*rat
self.K_6 += -self.rho*2*np.pi*weights[i]*K_6*JJ[i]*rat
self.K_Fz += -self.rho*2*np.pi*weights[i]*K_6*JJ[i]*rat
self.K_Ft += -self.rho*2*np.pi*weights[i]*K_5*JJ[i]*rat
self.M += -self.rho*2*np.pi*weights[i]*M*JJ[i]*rat
# if the element is coupled to solid elements - create the ingredients
# of the coupling matrices
N_phi = NN[:, -1].reshape(-1, 1)
self.Hf1 = self.rho*N_phi*self.nodes_at[-1]
# apply the boundary conditions at the axis of symmetry
if (0 in self.nodes_at) & (self.n != 0):
self.K_1 = np.delete(np.delete(self.K_1, [0], 0), [0], 1)
self.K_5 = np.delete(np.delete(self.K_5, [0], 0), [0], 1)
self.K_6 = np.delete(np.delete(self.K_6, [0], 0), [0], 1)
self.M = np.delete(np.delete(self.M, [0], 0), [0], 1)
self.K_F = np.delete(np.delete(self.K_F, [0], 0), [0], 1)
self.K_Ft = np.delete(np.delete(self.K_Ft, [0], 0), [0], 1)
self.K_Fz = np.delete(np.delete(self.K_Fz, [0], 0), [0], 1)
class SLAX6_PML(object):
"""
Class for an axisymmetric PML 1D line element with nodes wrt GLL quadrature
and higher order Lagrange interpolating polynomials
u = [ur utheta uz]
strain directions = [rr tt zz tz rz rt]
n is the circumferential order number"""
def __init__(self, nodes_at, n, PML_params, PML_function=gamma_PML_exp):
"""
Initialise the element with node locations and PML parameters
Parameters:
nodes_at : node locations
PML_params : parameters of the PML as a list [start, thickness, a + 1j*b]
PML_function : function defining the PML profile (exponential by default)
"""
self.nodes_at = nodes_at
self.nodes_per_el = len(nodes_at)
self.no_of_dofs = 3*self.nodes_per_el
# specify basic element characteristics and T-transformation
self.dofs_per_node = [3]*self.nodes_per_el
self.dofs_domain = [['s']*3]*self.nodes_per_el
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[1, 1j, 1j]]*self.nodes_per_el
self.n = n
self.C = None
self.loss_factor = None
self.rho = None
# preallocate the arrays
self.K_Fz = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_Ft = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_F = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_1 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_2 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_3 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_4 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_5 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.K_6 = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.M = np.zeros([self.no_of_dofs, self.no_of_dofs], dtype='complex')
self.Hs0 = None
self.PML_start = PML_params[0]
self.PML_thk = PML_params[1]
self.gamma = PML_params[2]
self.PML_function = PML_function
def add_properties(self, list_of_props):
"""Assign mechanical properties and the circumferential order.
Parameters:
list_of_props : [lame_1, lame_2, denisty, loss_factor]
n : circumferential order
"""
# assign the properties
self.loss_factor = list_of_props[3]
lame_1 = list_of_props[0]
lame_2 = list_of_props[1]
self.C = (1 + self.loss_factor*1.0j)*\
np.array([(lame_1 + 2*lame_2, lame_1, lame_1, 0, 0, 0),
(lame_1, lame_1 + 2*lame_2, lame_1, 0, 0, 0),
(lame_1, lame_1, lame_1 + 2*lame_2, 0, 0, 0),
(0, 0, 0, lame_2, 0, 0),
(0, 0, 0, 0, lame_2, 0),
(0, 0, 0, 0, 0, lame_2)])
self.rho = list_of_props[2]
def calculate_matrices(self):
"""Calculates the shape function and b matrices including
Jacobian*Gauss Weight* det(Jacobian) at all Gauss quadrature points"""
# get the nodal locations weights and shape functions for a given GLJ quadrature
ksis, weights, NN, NN_r, JJ = \
shape_fun.line_spectral_GLL_lagrange(self.nodes_at)
# extract the matrices and sum at each integration point
for i in range(len(ksis)):
# shape funcion values at current location are NN[:, i]
# generate the shape function matrix in standard form
diagonals = [[funct]*3 for funct in NN[:, i]]
N = np.column_stack([np.diag(diagonal) for diagonal in diagonals])
diagonals_r = [[funct]*3 for funct in NN_r[:, i]]
N_r = np.column_stack([np.diag(diagonal) for diagonal in diagonals_r])
# local r in physical coordinates
r_i = self.nodes_at.dot(NN[:, i])
# calculate the stretched r
if self.PML_function is gamma_PML:
r_tilde = -(self.gamma - 1)*(self.PML_start - r_i)**3/self.PML_thk**2 + r_i
elif self.PML_function is gamma_PML_exp:
local_r = (r_i - self.PML_start)/self.PML_thk
r_tilde = self.PML_start + self.PML_thk/self.gamma.real*(
np.exp(self.gamma.real*local_r) - 1) - \
1j*self.PML_thk/self.gamma.imag*(
np.exp(self.gamma.imag*local_r) - 1) + 1j*local_r*self.PML_thk
else:
print('Error. No such PML function defined')
# calculate the value of the PML profile at current integration point
current_gamma = self.PML_function(r_i, self.gamma, \
self.PML_start, self.PML_thk)
# differential operator matrices
L_z = np.zeros([6, 3])
L_theta = np.zeros([6, 3])
L_r = np.zeros([6, 3])
L = np.zeros([6, 3])
L_z[2, 2], L_z[3, 1], L_z[4, 0] = 1, 1, 1
L_r[0, 0], L_r[4, 2], L_r[5, 1] = 1, 1, 1
L[1, 0], L[5, 1] = 1.0, -1.0
L_theta[1, 1], L_theta[3, 2], L_theta[5, 0] = 1, 1, 1
B_1 = L.dot(N)/r_tilde + 1/current_gamma*L_r.dot(N_r)
B_2 = L_theta.dot(N)/r_tilde
B_3 = L_z.dot(N)
K_F1 = B_1.T.dot(self.C).dot(B_3)
K_F2 = B_1.T.dot(self.C).dot(B_2)
K_F3 = B_2.T.dot(self.C).dot(B_3)
K_1 = B_1.T.dot(self.C).dot(B_1)
K_2 = K_F2.T - K_F2
K_3 = K_F1.T - K_F1
K_4 = K_F3.T + K_F3
K_5 = B_2.T.dot(self.C).dot(B_2)
K_6 = B_3.T.dot(self.C).dot(B_3)
K_F = K_F1.T
K_Ft = K_F3.T
K_Fz = K_6
M = self.rho*N.T.dot(N)
# GLL integration
self.K_F += 2*np.pi*weights[i]*K_F*JJ[i]*r_tilde*current_gamma
self.K_Ft += 2*np.pi*weights[i]*K_Ft*JJ[i]*r_tilde*current_gamma
self.K_Fz += 2*np.pi*weights[i]*K_Fz*JJ[i]*r_tilde*current_gamma
self.K_1 += 2*np.pi*weights[i]*K_1*JJ[i]*r_tilde*current_gamma
self.K_2 += 2*np.pi*weights[i]*K_2*JJ[i]*r_tilde*current_gamma
self.K_3 += 2*np.pi*weights[i]*K_3*JJ[i]*r_tilde*current_gamma
self.K_4 += 2*np.pi*weights[i]*K_4*JJ[i]*r_tilde*current_gamma
self.K_5 += 2*np.pi*weights[i]*K_5*JJ[i]*r_tilde*current_gamma
self.K_6 += 2*np.pi*weights[i]*K_6*JJ[i]*r_tilde*current_gamma
self.M += 2*np.pi*weights[i]*M*JJ[i]*r_tilde*current_gamma
# if the element is coupled to fluid elements - create the ingredients
# of the coupling matrices
normal = np.array([1, 0, 0])
diagonals = [[funct]*3 for funct in NN[:, 0]]
N_u = np.column_stack([np.diag(diagonal) for diagonal in diagonals])
self.Hs0 = 2*np.pi*N_u.T.dot(normal.reshape(-1, 1))
class ALAX6_PML(object):
"""
Class for an acoustic axisymmetric 1D line element with nodes wrt GLL quadrature
and higher order Lagrange interpolating polynomials belonging to a PML
each node has one degree of freedom - velocity potential
n is the circumferential order number"""
def __init__(self, nodes_at, n, PML_params, PML_function=gamma_PML_exp):
"""
Initialise the element with node locations and PML parameters
Parameters:
nodes_at : node locations
PML_params : parameters of the PML as a list [start, thickness, a + 1j*b]
PML_function : function defining the PML profile (exponential by default)
"""
self.nodes_at = nodes_at
self.nodes_per_el = len(nodes_at)
self.no_of_dofs = self.nodes_per_el
# define basic properties of the element and T-transformation
self.dofs_per_node = [1]*self.nodes_per_el
self.dofs_domain = [['f']]*self.nodes_per_el
self.dofs_per_el = sum(self.dofs_per_node)
self.T_components = [[1]]*self.nodes_per_el
self.n = n
self.c_p = None
self.rho = None
# preallocate the arrays
self.K_Fz = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_Ft = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_F = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.K_1 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dz*dz
self.K_2 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta*dz
self.K_3 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dz
self.K_4 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta*dtheta
self.K_5 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # dtheta
self.K_6 = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex') # -
self.M = np.zeros([self.no_of_dofs, self.no_of_dofs],
dtype='complex')
self.Hf0 = None
self.PML_start = PML_params[0]
self.PML_thk = PML_params[1]
self.gamma = PML_params[2]
self.PML_function = PML_function
def add_properties(self, list_of_props):
"""Assign mechanical properties.
Parameters:
list_of_props : [bulk modulus, density]
"""
# assign properties
self.c_p = (list_of_props[0]/list_of_props[1])**0.5
self.rho = list_of_props[1]
def calculate_matrices(self):
"""Calculates the shape function and b matrices including
Jacobian*Gauss Weight* det(Jacobian) at all Gauss quadrature points"""
# get the nodal locations weights and shape functions for a given GLL quadrature
ksis, weights, NN, NN_r, JJ = \
shape_fun.line_spectral_GLL_lagrange(self.nodes_at)
# extract the matrices and sum at each integration point
for i in range(len(ksis)):
# shape funcion values at current location are NN[:, i]
# generate the shape function matrix in standard form
N = NN[:, i].reshape(1, -1)
N_r = NN_r[:, i].reshape(1, -1)
# local r in physical coordinates
r_i = self.nodes_at.dot(NN[:, i])
# calculate the stretched r according to the chosen PML profile
if self.PML_function is gamma_PML:
r_tilde = -(self.gamma - 1)*(self.PML_start - r_i)**3/self.PML_thk**2 + r_i
elif self.PML_function is gamma_PML_exp:
local_r = (r_i - self.PML_start)/self.PML_thk
r_tilde = self.PML_start + self.PML_thk/self.gamma.real*(
np.exp(self.gamma.real*local_r) - 1) - \
1j*self.PML_thk/self.gamma.imag*(
np.exp(self.gamma.imag*local_r) - 1) + 1j*local_r*self.PML_thk
else:
print('Error. No such PML function defined')
# calculate the value of the PML profile at the current location
current_gamma = self.PML_function(r_i, self.gamma, \
self.PML_start, self.PML_thk)
K_1 = 1/current_gamma**2*N_r.T.dot(N_r)
K_5 = 1/r_tilde**2*N.T.dot(N)
K_6 = N.T.dot(N)
M = N.T.dot(N)/self.c_p**2
# GLL integration
self.K_6 += -self.rho*2*np.pi*weights[i]*K_6*JJ[i]*r_tilde*current_gamma
self.K_1 += -self.rho*2*np.pi*weights[i]*K_1*JJ[i]*r_tilde*current_gamma
self.K_5 += -self.rho*2*np.pi*weights[i]*K_5*JJ[i]*r_tilde*current_gamma
self.K_Fz += -self.rho*2*np.pi*weights[i]*K_6*JJ[i]*r_tilde*current_gamma
self.K_Ft += -self.rho*2*np.pi*weights[i]*K_5*JJ[i]*r_tilde*current_gamma
self.M += -self.rho*2*np.pi*weights[i]*M*JJ[i]*r_tilde*current_gamma
# if the element is coupled to solid elements - create the ingredients
# of the coupling matrices
N_phi = NN[:, 0].reshape(-1, 1)
self.Hf0 = self.rho*N_phi
| 44.569444
| 94
| 0.552731
| 7,124
| 44,926
| 3.284672
| 0.051376
| 0.037393
| 0.048205
| 0.057949
| 0.946795
| 0.937094
| 0.92735
| 0.920983
| 0.911966
| 0.901197
| 0
| 0.038762
| 0.312625
| 44,926
| 1,007
| 95
| 44.613704
| 0.718986
| 0.215221
| 0
| 0.733756
| 0
| 0
| 0.024429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03645
| false
| 0
| 0.007924
| 0
| 0.057052
| 0.004754
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5fc2bf4beb1a7c9a9aabde70d8cec59e39de63d8
| 85
|
py
|
Python
|
proxypay/references/__init__.py
|
AgeuMatheus/django-proxypay
|
90736875e434013abe3ea1be5f9ff3100c9005db
|
[
"MIT"
] | 12
|
2020-05-06T17:07:26.000Z
|
2020-10-19T15:41:56.000Z
|
proxypay/references/__init__.py
|
txiocoder/django-proxypay
|
90736875e434013abe3ea1be5f9ff3100c9005db
|
[
"MIT"
] | 1
|
2020-05-22T14:24:29.000Z
|
2020-06-07T10:38:10.000Z
|
proxypay/references/__init__.py
|
txiocoder/django-proxypay
|
90736875e434013abe3ea1be5f9ff3100c9005db
|
[
"MIT"
] | null | null | null |
from proxypay.references.create import create
from proxypay.references.get import get
| 42.5
| 45
| 0.870588
| 12
| 85
| 6.166667
| 0.5
| 0.324324
| 0.594595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082353
| 85
| 2
| 46
| 42.5
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
399c2e0f44bdf00501d9eb64e3613018f618b64b
| 1,207
|
py
|
Python
|
src/test/test_sample.py
|
andrew-oxenburgh/python-bowling
|
9f8ee6aa885a23c6d91f67a65b754defef5c86a4
|
[
"MIT"
] | null | null | null |
src/test/test_sample.py
|
andrew-oxenburgh/python-bowling
|
9f8ee6aa885a23c6d91f67a65b754defef5c86a4
|
[
"MIT"
] | null | null | null |
src/test/test_sample.py
|
andrew-oxenburgh/python-bowling
|
9f8ee6aa885a23c6d91f67a65b754defef5c86a4
|
[
"MIT"
] | null | null | null |
from src import bottles_of_beer
RES_0_BOTTLE = '''
no more bottles of beer on the wall,
no more bottles of beer.
go to the store and buy some more,
0 bottles of beer on the wall.
'''
def test_0_bottle():
actual = bottles_of_beer.how_many_bottles(0)
expected = RES_0_BOTTLE
assert actual == expected
RES_1_BOTTLE = '''
1 more bottle beer on the wall,
1 more bottle of beer.
drink it down pass it around,
1 more bottle of beer
no more bottles of beer on the wall,
no more bottles of beer.
go to the store and buy some more,
1 bottles of beer on the wall.
'''
def test_1_bottle():
actual = bottles_of_beer.how_many_bottles(1)
expected = RES_1_BOTTLE
assert actual == expected
RES_2_BOTTLES = '''
2 more bottles of beer on the wall,
2 more bottles of beer.
drink it down pass it around,
2 more bottles of beer
1 more bottle beer on the wall,
1 more bottle of beer.
drink it down pass it around,
1 more bottle of beer
no more bottles of beer on the wall,
no more bottles of beer.
go to the store and buy some more,
2 bottles of beer on the wall.
'''
def test_2_bottle():
actual = bottles_of_beer.how_many_bottles(2)
expected = RES_2_BOTTLES
assert actual == expected
| 24.14
| 48
| 0.732394
| 226
| 1,207
| 3.769912
| 0.150442
| 0.140845
| 0.244131
| 0.179577
| 0.855634
| 0.751174
| 0.751174
| 0.720657
| 0.447183
| 0.447183
| 0
| 0.025105
| 0.207954
| 1,207
| 50
| 49
| 24.14
| 0.866109
| 0
| 0
| 0.55814
| 0
| 0
| 0.596026
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.069767
| false
| 0.069767
| 0.023256
| 0
| 0.093023
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
39b44c165af377c6704b1b11480222b4943c7937
| 7,430
|
py
|
Python
|
tests/argguard_test.py
|
gcascio/yaml-attributes
|
43478cf8f49d5f4c22d9ae229dc3ec8fc9a360d7
|
[
"MIT"
] | null | null | null |
tests/argguard_test.py
|
gcascio/yaml-attributes
|
43478cf8f49d5f4c22d9ae229dc3ec8fc9a360d7
|
[
"MIT"
] | 2
|
2021-09-29T22:48:43.000Z
|
2021-09-29T22:54:41.000Z
|
tests/argguard_test.py
|
gcascio/yaml-attributes
|
43478cf8f49d5f4c22d9ae229dc3ec8fc9a360d7
|
[
"MIT"
] | null | null | null |
from typing import Optional
import pytest
from yamlattributes import YamlAttributes
def test_config_loads_successfully_in_sync_mode():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
TestConfig.init()
# Assert
assert (
TestConfig.test_attribute_a == 'config_test_value_a'
and TestConfig.test_attribute_b == 'config_test_value_b'
and TestConfig.test_attribute_c == 42
), 'Config values are not correctly assigned to config class'
def test_config_fails_with_incomplete_yaml_in_sync_mode():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/incomplete_test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
# Assert
with pytest.raises(
AssertionError,
match=r".*\"sync\" mode.*",
):
TestConfig.init()
def test_config_fails_with_overloaded_yaml_in_sync_mode():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/overloaded_test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
# Assert
with pytest.raises(
AssertionError,
match=r".*\"sync\" mode.*",
):
TestConfig.init()
def test_config_loads_successfully_in_soft_config_mode():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
TestConfig.init(mode='soft_config')
# Assert
assert (
TestConfig.test_attribute_a == 'config_test_value_a'
and TestConfig.test_attribute_b == 'config_test_value_b'
and TestConfig.test_attribute_c == 42
), 'Config values are not correctly assigned to config class'
def test_config_loads_successfully_overloaded_config_in_soft_config_mode():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/overloaded_test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
TestConfig.init(mode='soft_config')
# Assert
assert (
TestConfig.test_attribute_a == 'config_test_value_a'
and TestConfig.test_attribute_b == 'config_test_value_b'
and TestConfig.test_attribute_c == 42
and not hasattr(TestConfig, 'test_attribute_d')
), 'Config values are not correctly assigned to config class'
def test_config_fails_with_incomplete_yaml_in_soft_config_mode():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/incomplete_test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
# Assert
with pytest.raises(
AssertionError,
match=r".*\"soft_config\" mode.*",
):
TestConfig.init(mode='soft_config')
def test_config_fails_with_type_mismatch():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/test_config.yaml'
# create type mismatch between config class and YAML file
# by changing str to int
test_attribute_a: int
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
# Assert
with pytest.raises(
AssertionError,
match=r".*missmatch.*\"test_attribute_a\" attribute.*",
):
TestConfig.init()
def test_config_loads_successfully_with_union_type():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
optional_attribute: Optional[int]
# Act
TestConfig.init()
# Assert
assert (
TestConfig.test_attribute_a == 'config_test_value_a'
and TestConfig.test_attribute_b == 'config_test_value_b'
and TestConfig.test_attribute_c == 42
and hasattr(TestConfig, 'optional_attribute')
and not TestConfig.optional_attribute
), 'Config values are not correctly assigned to config class'
def test_config_successfully_assighns_optional_values():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: Optional[int]
# Act
TestConfig.init()
# Assert
assert (
TestConfig.test_attribute_a == 'config_test_value_a'
and TestConfig.test_attribute_b == 'config_test_value_b'
and TestConfig.test_attribute_c == 42
), 'Config values are not correctly assigned to config class'
def test_config_successfully_converts_to_dict():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
test_config_dict = {
'yaml_file_path': './tests/test_config.yaml',
'test_attribute_a': 'config_test_value_a',
'test_attribute_b': 'config_test_value_b',
'test_attribute_c': 42,
}
# print(TestConfig.to_dict())
# Act
TestConfig.init()
# Assert
assert (
test_config_dict == TestConfig.to_dict()
), 'Config class was not correctly converted to a dict'
def test_config_loads_successfully_when_config_path_is_passed_to_init():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
TestConfig.init(yaml_file_path='./tests/test_config.yaml')
# Assert
assert (
TestConfig.test_attribute_a == 'config_test_value_a'
and TestConfig.test_attribute_b == 'config_test_value_b'
and TestConfig.test_attribute_c == 42
), 'Config values are not correctly assigned to config class'
def test_config_loads_successfully_when_config_section_is_passed_to_init():
YamlAttributes.__abstractmethods__ = set()
# Arrange
class TestConfig(YamlAttributes):
yaml_file_path = './tests/test_config.yaml'
test_attribute_a: str
test_attribute_b = 'test_value_b'
test_attribute_c: int
# Act
TestConfig.init(yaml_section='another_config')
# Assert
assert (
TestConfig.test_attribute_a == 'another_config_test_value_a'
and TestConfig.test_attribute_b == 'another_config_test_value_b'
and TestConfig.test_attribute_c == 7
), 'Config values are not correctly assigned to config class'
| 28.037736
| 75
| 0.688425
| 876
| 7,430
| 5.391553
| 0.082192
| 0.170654
| 0.107135
| 0.07707
| 0.882278
| 0.863646
| 0.85433
| 0.806479
| 0.806479
| 0.778107
| 0
| 0.002621
| 0.229879
| 7,430
| 264
| 76
| 28.143939
| 0.822789
| 0.044953
| 0
| 0.777778
| 0
| 0
| 0.207058
| 0.058107
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.074074
| false
| 0.012346
| 0.018519
| 0
| 0.462963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39b94a64c7b098faa32d96f42d7c525f54f052e8
| 2,096
|
py
|
Python
|
qa/tasks/tests/test_devstack.py
|
rpratap-bot/ceph
|
9834961a66927ae856935591f2fd51082e2ee484
|
[
"MIT"
] | 4
|
2020-04-08T03:42:02.000Z
|
2020-10-01T20:34:48.000Z
|
qa/tasks/tests/test_devstack.py
|
rpratap-bot/ceph
|
9834961a66927ae856935591f2fd51082e2ee484
|
[
"MIT"
] | 93
|
2020-03-26T14:29:14.000Z
|
2020-11-12T05:54:55.000Z
|
qa/tasks/tests/test_devstack.py
|
rpratap-bot/ceph
|
9834961a66927ae856935591f2fd51082e2ee484
|
[
"MIT"
] | 23
|
2020-03-24T10:28:44.000Z
|
2020-09-24T09:42:19.000Z
|
from textwrap import dedent
from tasks import devstack
class TestDevstack(object):
def test_parse_os_table(self):
table_str = dedent("""
+---------------------+--------------------------------------+
| Property | Value |
+---------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| created_at | 2014-02-21T17:14:47.548361 |
| display_description | None |
| display_name | NAME |
| id | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e |
| metadata | {} |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| volume_type | None |
+---------------------+--------------------------------------+
""").strip()
expected = {
'Property': 'Value',
'attachments': '[]',
'availability_zone': 'nova',
'bootable': 'false',
'created_at': '2014-02-21T17:14:47.548361',
'display_description': 'None',
'display_name': 'NAME',
'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e',
'metadata': '{}',
'size': '1',
'snapshot_id': 'None',
'source_volid': 'None',
'status': 'creating',
'volume_type': 'None'}
vol_info = devstack.parse_os_table(table_str)
assert vol_info == expected
| 42.77551
| 74
| 0.3125
| 118
| 2,096
| 5.355932
| 0.483051
| 0.022152
| 0.037975
| 0.113924
| 0.721519
| 0.721519
| 0.721519
| 0.721519
| 0.721519
| 0.721519
| 0
| 0.068695
| 0.513836
| 2,096
| 48
| 75
| 43.666667
| 0.551521
| 0
| 0
| 0.075
| 0
| 0
| 0.734226
| 0.148184
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.025
| false
| 0
| 0.05
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39c180ff82a0772db8834b418bec31a2766cdb59
| 2,124
|
py
|
Python
|
AutoEncoder.py
|
Batman9698/AutoEncoder
|
96e5a3f805ed9884227ef14739dd626900ec24a6
|
[
"MIT"
] | null | null | null |
AutoEncoder.py
|
Batman9698/AutoEncoder
|
96e5a3f805ed9884227ef14739dd626900ec24a6
|
[
"MIT"
] | null | null | null |
AutoEncoder.py
|
Batman9698/AutoEncoder
|
96e5a3f805ed9884227ef14739dd626900ec24a6
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
#AE_v4
class AutoEncoder_v4(nn.Module):
def __init__(self, encoding_dim):
super(AutoEncoder_v4, self).__init__()
self.encoder = nn.Sequential(nn.Linear(784, encoding_dim),
nn.Tanh())
self.decoder = nn.Sequential(nn.Linear(encoding_dim, 784),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
# AE_v3
class AutoEncoder_v3(nn.Module):
def __init__(self, encoding_dim):
super(AutoEncoder_v3, self).__init__()
self.encoder = nn.Sequential(nn.Linear(784, 512),
nn.ReLU(),
nn.Linear(512,256),
nn.ReLU(),
nn.Linear(256, encoding_dim))
self.decoder = nn.Sequential(nn.Linear(encoding_dim, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 784),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
# AE_v2
class AutoEncoder_v2(nn.Module):
def __init__(self, encoding_dim):
super(AutoEncoder_v2, self).__init__()
self.encoder = nn.Sequential(nn.Linear(784, encoding_dim),
nn.ReLU())
self.decoder = nn.Sequential(nn.Linear(encoding_dim, 784),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
| 33.1875
| 66
| 0.479284
| 234
| 2,124
| 4.17094
| 0.136752
| 0.081967
| 0.086066
| 0.122951
| 0.893443
| 0.893443
| 0.811475
| 0.811475
| 0.768443
| 0.584016
| 0
| 0.040735
| 0.410546
| 2,124
| 64
| 67
| 33.1875
| 0.738818
| 0.008004
| 0
| 0.716981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.226415
| false
| 0
| 0.018868
| 0.113208
| 0.471698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
39e32cadc9efd78ea28df489a31d74fa9ba6e8f2
| 37,102
|
py
|
Python
|
lux_ai/models.py
|
limash/lux-ai
|
c9f0ccd659e03919b0b80d84b770a9ba506b041a
|
[
"Apache-2.0"
] | null | null | null |
lux_ai/models.py
|
limash/lux-ai
|
c9f0ccd659e03919b0b80d84b770a9ba506b041a
|
[
"Apache-2.0"
] | null | null | null |
lux_ai/models.py
|
limash/lux-ai
|
c9f0ccd659e03919b0b80d84b770a9ba506b041a
|
[
"Apache-2.0"
] | null | null | null |
# move all imports inside functions to use ray.remote multitasking
def actor_critic_residual_six_actions(actions_shape):
import tensorflow as tf
import tensorflow.keras as keras
class ResidualUnit(keras.layers.Layer):
def __init__(self, filters, initializer, activation, **kwargs):
super().__init__(**kwargs)
self._filters = filters
self._activation = activation
self._conv = keras.layers.Conv2D(filters, 3, kernel_initializer=initializer, padding="same", use_bias=False)
self._norm = keras.layers.BatchNormalization()
def call(self, inputs, training=False, **kwargs):
x = self._conv(inputs)
x = self._norm(x, training=training)
return self._activation(inputs + x)
def compute_output_shape(self, batch_input_shape):
batch, x, y, _ = batch_input_shape
return [batch, x, y, self._filters]
class ResidualModel(keras.Model):
def __init__(self, actions_number, **kwargs):
super().__init__(**kwargs)
filters = 200
layers = 10
initializer = keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')
initializer_random = keras.initializers.random_uniform(minval=-0.03, maxval=0.03)
activation = keras.activations.relu
self._conv = keras.layers.Conv2D(filters, 3, padding="same", kernel_initializer=initializer, use_bias=False)
self._norm = keras.layers.BatchNormalization()
self._activation = keras.layers.ReLU()
self._residual_block = [ResidualUnit(filters, initializer, activation) for _ in range(layers)]
# self._depthwise = keras.layers.DepthwiseConv2D(32)
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
# self._city_tiles_probs0 = keras.layers.Dense(128, activation=activation, kernel_initializer=initializer)
# self._city_tiles_probs1 = keras.layers.Dense(4, activation="softmax",
# kernel_initializer=initializer_random)
self._workers_probs0 = keras.layers.Dense(128, activation=activation, kernel_initializer=initializer)
self._workers_probs1 = keras.layers.Dense(actions_number, activation="softmax",
kernel_initializer=initializer_random)
# self._carts_probs0 = keras.layers.Dense(128, activation=activation, kernel_initializer=initializer)
# self._carts_probs1 = keras.layers.Dense(17, activation="softmax", kernel_initializer=initializer_random)
self._baseline = keras.layers.Dense(1, kernel_initializer=initializer_random,
activation=keras.activations.tanh)
def call(self, inputs, training=False, mask=None):
features = inputs
x = features
x = self._conv(x)
x = self._norm(x, training=training)
x = self._activation(x)
for layer in self._residual_block:
x = layer(x, training=training)
shape_x = tf.shape(x)
y = tf.reshape(x, (shape_x[0], -1, shape_x[-1]))
y = tf.reduce_mean(y, axis=1)
z1 = (x * features[:, :, :, :1])
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([z1, z2], axis=1)
# t = self._city_tiles_probs0(z)
# t = self._city_tiles_probs1(t)
w = self._workers_probs0(z)
w = self._workers_probs1(w)
# c = self._carts_probs0(z)
# c = self._carts_probs1(c)
# probs = tf.concat([t, w, c], axis=1)
# probs = probs * actions_mask
probs = w
baseline = self._baseline(tf.concat([y, z], axis=1))
return probs, baseline
def get_config(self):
pass
model = ResidualModel(actions_shape)
return model
def actor_critic_sep_residual_six_actions(actions_shape):
import tensorflow as tf
import tensorflow.keras as keras
class ResidualUnit(keras.layers.Layer):
def __init__(self, filters, initializer, activation, **kwargs):
super().__init__(**kwargs)
self._filters = filters
self._activation = activation
self._conv = keras.layers.Conv2D(filters, 3, kernel_initializer=initializer, padding="same", use_bias=False)
self._norm = keras.layers.BatchNormalization()
def call(self, inputs, training=False, **kwargs):
x = self._conv(inputs)
x = self._norm(x, training=training)
return self._activation(inputs + x)
def compute_output_shape(self, batch_input_shape):
batch, x, y, _ = batch_input_shape
return [batch, x, y, self._filters]
class CriticBranch(keras.layers.Layer):
def __init__(self, filters, initializer, activation, layers, **kwargs):
super().__init__(**kwargs)
self._residual_block = [ResidualUnit(filters, initializer, activation) for _ in range(layers)]
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._fc_128 = keras.layers.Dense(128, activation=activation, kernel_initializer=initializer)
def call(self, inputs, training=False, **kwargs):
x, center = inputs
for layer in self._residual_block:
x = layer(x, training=training)
shape_x = tf.shape(x)
y = tf.reshape(x, (shape_x[0], -1, shape_x[-1]))
y = tf.reduce_mean(y, axis=1)
z1 = (x * center)
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([y, z1, z2], axis=1)
z = self._fc_128(z)
return z
class ActorBranch(keras.layers.Layer):
def __init__(self, filters, initializer, activation, layers, **kwargs):
super().__init__(**kwargs)
self._residual_block = [ResidualUnit(filters, initializer, activation) for _ in range(layers)]
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._fc_128 = keras.layers.Dense(128, activation=activation, kernel_initializer=initializer)
def call(self, inputs, training=False, **kwargs):
x, center = inputs
for layer in self._residual_block:
x = layer(x, training=training)
z1 = (x * center)
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([z1, z2], axis=1)
z = self._fc_128(z)
return z
class ResidualModel(keras.Model):
def __init__(self, actions_number, **kwargs):
super().__init__(**kwargs)
initializer = keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')
initializer_random = keras.initializers.random_uniform(minval=-0.03, maxval=0.03)
activation = keras.activations.relu
self._root = keras.layers.Conv2D(200, 3, padding="same", kernel_initializer=initializer, use_bias=False)
self._root_norm = keras.layers.BatchNormalization()
self._root_activation = keras.layers.ReLU()
# actor
actor_filters = 200
actor_layers = 10
self._actor_branch = ActorBranch(actor_filters, initializer, activation, actor_layers)
self._action_type = keras.layers.Dense(actions_number, activation="softmax",
kernel_initializer=initializer_random)
# critic
critic_filters = 200
critic_layers = 10
self._critic_branch = CriticBranch(critic_filters, initializer, activation, critic_layers)
self._baseline = keras.layers.Dense(1, kernel_initializer=initializer_random,
activation=keras.activations.tanh)
def call(self, inputs, training=False, mask=None):
features = inputs
x = features
x = self._root(x)
x = self._root_norm(x, training=training)
x = self._root_activation(x)
center = features[:, :, :, :1]
z = (x, center)
w1 = self._actor_branch(z, training=training)
action_probs = self._action_type(w1)
w2 = self._critic_branch(z, training=training)
baseline = self._baseline(w2)
return action_probs, baseline
def get_config(self):
pass
model = ResidualModel(actions_shape)
return model
def actor_critic_residual_with_transfer():
import tensorflow as tf
import tensorflow.keras as keras
class ResidualUnit(keras.layers.Layer):
def __init__(self, filters, initializer, activation, **kwargs):
super().__init__(**kwargs)
self._filters = filters
self._activation = activation
self._conv = keras.layers.Conv2D(filters, 3, kernel_initializer=initializer, padding="same", use_bias=False)
self._norm = keras.layers.BatchNormalization()
def call(self, inputs, training=False, **kwargs):
x = self._conv(inputs)
x = self._norm(x, training=training)
return self._activation(inputs + x)
def compute_output_shape(self, batch_input_shape):
batch, x, y, _ = batch_input_shape
return [batch, x, y, self._filters]
class ActorBranch(keras.layers.Layer):
def __init__(self, filters, initializer, activation, layers, **kwargs):
super().__init__(**kwargs)
self._residual_block = [ResidualUnit(filters, initializer, activation) for _ in range(layers)]
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._fc_200 = keras.layers.Dense(200, activation=activation, kernel_initializer=initializer)
def call(self, inputs, training=False, **kwargs):
x, center = inputs
for layer in self._residual_block:
x = layer(x, training=training)
z1 = (x * center)
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([z1, z2], axis=1)
z = self._fc_200(z)
return z
class ResidualModel(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
filters = 200
stem_layers = 10
branch_layers = 1
initializer = keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')
initializer_random = keras.initializers.random_uniform(minval=-0.03, maxval=0.03)
activation = keras.activations.relu
self._root = keras.layers.Conv2D(filters, 3, padding="same", kernel_initializer=initializer, use_bias=False)
self._root_norm = keras.layers.BatchNormalization()
self._root_activation = keras.layers.ReLU()
self._stem = [ResidualUnit(filters, initializer, activation) for _ in range(stem_layers)]
# action type: north, east, south, west, idle, build, transfer
self._action_type_branch = ActorBranch(filters, initializer, activation, branch_layers)
self._action_type = keras.layers.Dense(7, activation="softmax",
kernel_initializer=initializer_random)
# transfer direction: north, east, south, west
self._transfer_direction_branch = ActorBranch(filters, initializer, activation, branch_layers)
self._transfer_direction = keras.layers.Dense(4, activation="softmax",
kernel_initializer=initializer_random)
# resource to transfer: wood, coal, uranium
self._transfer_resource_branch = ActorBranch(filters, initializer, activation, branch_layers)
self._transfer_resource = keras.layers.Dense(3, activation="softmax",
kernel_initializer=initializer_random)
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._baseline = keras.layers.Dense(1, kernel_initializer=initializer_random,
activation=keras.activations.tanh)
def call(self, inputs, training=False, mask=None):
features = inputs
x = features
x = self._root(x)
x = self._root_norm(x, training=training)
x = self._root_activation(x)
for layer in self._stem:
x = layer(x, training=training)
center = features[:, :, :, :1]
z = (x, center)
w1 = self._action_type_branch(z, training=training)
action_type_probs = self._action_type(w1)
w3 = self._transfer_direction_branch(z, training=training)
transfer_direction_probs = self._transfer_direction(w3)
w4 = self._transfer_resource_branch(z, training=training)
transfer_resource_probs = self._transfer_resource(w4)
shape_x = tf.shape(x)
y = tf.reshape(x, (shape_x[0], -1, shape_x[-1]))
y = tf.reduce_mean(y, axis=1)
o1 = (x * center)
shape_o1 = tf.shape(o1)
o1 = tf.reshape(o1, (shape_o1[0], -1, shape_o1[-1]))
o1 = tf.reduce_sum(o1, axis=1)
o2 = self._depthwise(x)
o2 = self._flatten(o2)
o = tf.concat([o1, o2], axis=1)
baseline = self._baseline(tf.concat([y, o], axis=1))
return action_type_probs, transfer_direction_probs, transfer_resource_probs, baseline
def get_config(self):
pass
model = ResidualModel()
return model
def actor_critic_residual_shrub():
import tensorflow as tf
import tensorflow.keras as keras
class ResidualUnit(keras.layers.Layer):
def __init__(self, filters, initializer, activation, **kwargs):
super().__init__(**kwargs)
self._filters = filters
self._activation = activation
self._conv = keras.layers.Conv2D(filters, 3, kernel_initializer=initializer,
kernel_regularizer=keras.regularizers.l2(l2=1.e-5),
padding="same", use_bias=False)
self._norm = keras.layers.BatchNormalization()
def call(self, inputs, training=False, **kwargs):
x = self._conv(inputs)
x = self._norm(x, training=training)
return self._activation(inputs + x)
def compute_output_shape(self, batch_input_shape):
batch, x, y, _ = batch_input_shape
return [batch, x, y, self._filters]
class ActorBranch(keras.layers.Layer):
def __init__(self, filters, initializer, activation, layers, **kwargs):
super().__init__(**kwargs)
self._residual_block = [ResidualUnit(filters, initializer, activation) for _ in range(layers)]
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._fc = keras.layers.Dense(filters, activation=activation, kernel_initializer=initializer)
def call(self, inputs, training=False, **kwargs):
x, center = inputs
for layer in self._residual_block:
x = layer(x, training=training)
z1 = (x * center)
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([z1, z2], axis=1)
z = self._fc(z)
return z
class ResidualModel(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
initializer = keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')
initializer_random = keras.initializers.random_uniform(minval=-0.03, maxval=0.03)
activation = keras.activations.relu
# movements branch
self._root1 = keras.layers.Conv2D(256, 3, padding="same", kernel_initializer=initializer,
kernel_regularizer=keras.regularizers.l2(l2=1.e-5),
use_bias=False)
self._root_norm1 = keras.layers.BatchNormalization()
self._root_activation1 = keras.layers.ReLU()
# movement direction
self._movement_direction_branch = ActorBranch(256, initializer, activation, layers=11)
self._movement_direction = keras.layers.Dense(4, activation="softmax",
kernel_initializer=initializer_random)
# move idle bcity
self._root2 = keras.layers.Conv2D(128, 3, padding="same", kernel_initializer=initializer,
kernel_regularizer=keras.regularizers.l2(l2=1.e-5),
use_bias=False)
self._root_norm2 = keras.layers.BatchNormalization()
self._root_activation2 = keras.layers.ReLU()
self._move_build_idle_branch = ActorBranch(128, initializer, activation, layers=11)
self._mbi = keras.layers.Dense(3, activation="softmax", kernel_initializer=initializer_random)
def call(self, inputs, training=False, mask=None):
x1 = inputs
center = inputs[:, :, :, :1]
x1 = self._root1(x1)
x1 = self._root_norm1(x1, training=training)
x1 = self._root_activation1(x1)
z1 = (x1, center)
w1 = self._movement_direction_branch(z1, training=training)
movement_direction_probs = self._movement_direction(w1)
x2 = inputs
x2 = self._root2(x2)
x2 = self._root_norm2(x2, training=training)
x2 = self._root_activation2(x2)
z2 = (x2, center)
w2 = self._move_build_idle_branch(z2, training=training)
mbi_probs = self._mbi(w2)
return movement_direction_probs, mbi_probs
def get_config(self):
pass
model = ResidualModel()
return model
def actor_critic_residual_switch_shrub():
import tensorflow as tf
import tensorflow.keras as keras
class ResidualUnit(keras.layers.Layer):
def __init__(self, filters, initializer, activation, **kwargs):
super().__init__(**kwargs)
self._filters = filters
self._activation = activation
self._conv = keras.layers.Conv2D(filters, 3, kernel_initializer=initializer,
kernel_regularizer=keras.regularizers.l2(l2=1.e-5),
padding="same", use_bias=False)
self._norm = keras.layers.BatchNormalization()
def call(self, inputs, training=False, **kwargs):
x = self._conv(inputs)
x = self._norm(x, training=training)
return self._activation(inputs + x)
def compute_output_shape(self, batch_input_shape):
batch, x, y, _ = batch_input_shape
return [batch, x, y, self._filters]
class ActorBranch(keras.layers.Layer):
def __init__(self, filters, initializer, activation, layers, **kwargs):
super().__init__(**kwargs)
self._residual_block = [ResidualUnit(filters, initializer, activation) for _ in range(layers)]
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._fc = keras.layers.Dense(filters, activation=activation, kernel_initializer=initializer)
def call(self, inputs, training=False, **kwargs):
x, center = inputs
for layer in self._residual_block:
x = layer(x, training=training)
z1 = (x * center)
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([z1, z2], axis=1)
z = self._fc(z)
return z
class ResidualModel(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
filters = 200
stem_layers1 = 4
branch_layers1 = 6
stem_layers2 = 6
branch_layers2 = 4
initializer = keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')
initializer_random = keras.initializers.random_uniform(minval=-0.03, maxval=0.03)
activation = keras.activations.relu
self._root = keras.layers.Conv2D(filters, 3, padding="same", kernel_initializer=initializer,
kernel_regularizer=keras.regularizers.l2(l2=1.e-5),
use_bias=False)
self._root_norm = keras.layers.BatchNormalization()
self._root_activation = keras.layers.ReLU()
# stem1
self._stem1 = [ResidualUnit(filters, initializer, activation) for _ in range(stem_layers1)]
# movement directions
self._north_branch = ActorBranch(filters, initializer, activation, branch_layers1)
self._north = keras.layers.Dense(2, activation="softmax", kernel_initializer=initializer_random)
self._east_branch = ActorBranch(filters, initializer, activation, branch_layers1)
self._east = keras.layers.Dense(2, activation="softmax", kernel_initializer=initializer_random)
self._south_branch = ActorBranch(filters, initializer, activation, branch_layers1)
self._south = keras.layers.Dense(2, activation="softmax", kernel_initializer=initializer_random)
self._west_branch = ActorBranch(filters, initializer, activation, branch_layers1)
self._west = keras.layers.Dense(2, activation="softmax", kernel_initializer=initializer_random)
# build
self._build_branch = ActorBranch(filters, initializer, activation, branch_layers1)
self._build = keras.layers.Dense(2, activation="softmax", kernel_initializer=initializer_random)
# idle
self._idle_branch = ActorBranch(filters, initializer, activation, branch_layers1)
self._idle = keras.layers.Dense(2, activation="softmax", kernel_initializer=initializer_random)
# stem2
self._stem2 = [ResidualUnit(filters, initializer, activation) for _ in range(stem_layers2)]
# transfer
self._transfer_branch = ActorBranch(filters, initializer, activation, branch_layers2)
self._transfer = keras.layers.Dense(2, activation="softmax", kernel_initializer=initializer_random)
# transfer direction
self._transfer_direction_branch = ActorBranch(filters, initializer, activation, branch_layers2)
self._transfer_direction = keras.layers.Dense(4, activation="softmax",
kernel_initializer=initializer_random)
# resource to transfer
self._transfer_resource_branch = ActorBranch(filters, initializer, activation, branch_layers2)
self._transfer_resource = keras.layers.Dense(3, activation="softmax",
kernel_initializer=initializer_random)
def call(self, inputs, training=False, mask=None):
features = inputs
x = features
center = features[:, :, :, :1]
x = self._root(x)
x = self._root_norm(x, training=training)
x = self._root_activation(x)
x1 = x
for layer in self._stem1:
x1 = layer(x1, training=training)
z1 = (x1, center)
wn = self._north_branch(z1, training=training)
north_switch = self._north(wn)
we = self._east_branch(z1, training=training)
east_switch = self._east(we)
ws = self._south_branch(z1, training=training)
south_switch = self._south(ws)
ww = self._west_branch(z1, training=training)
west_switch = self._west(ww)
wb = self._build_branch(z1, training=training)
build_switch = self._build(wb)
wi = self._idle_branch(z1, training=training)
idle_switch = self._idle(wi)
x2 = x
for layer in self._stem2:
x2 = layer(x2, training=training)
z2 = (x2, center)
wt = self._transfer_branch(z2, training=training)
transfer_switch = self._transfer(wt)
w_td = self._transfer_direction_branch(z2, training=training)
transfer_direction_probs = self._transfer_direction(w_td)
w_tr = self._transfer_resource_branch(z2, training=training)
transfer_resource_probs = self._transfer_resource(w_tr)
return north_switch, east_switch, south_switch, west_switch, build_switch, idle_switch, \
transfer_switch, transfer_direction_probs, transfer_resource_probs
def get_config(self):
pass
model = ResidualModel()
return model
def actor_critic_efficient_six_actions(actions_shape):
import copy
import itertools
import tensorflow as tf
import tensorflow.keras as keras
# import lux_ai.effnetv2_model as eff_model
import lux_ai.hparams as hparams
import lux_ai.effnetv2_configs as effnetv2_configs
import lux_ai.utils as utils
from lux_ai.effnetv2_model import round_filters, round_repeats, Stem, MBConvBlock, FusedMBConvBlock
class EfficientModel(keras.Model):
def __init__(self, actions_n, **kwargs):
model_name = 'efficientnetv2-s'
super().__init__(name=model_name, **kwargs)
cfg = copy.deepcopy(hparams.base_config)
if model_name:
cfg.override(effnetv2_configs.get_model_config(model_name))
self.cfg = cfg
self._mconfig = cfg.model
self._stem = Stem(self._mconfig, self._mconfig.blocks_args[0].input_filters)
self._blocks = []
block_id = itertools.count(0)
block_name = lambda: 'blocks_%d' % next(block_id)
for block_args in self._mconfig.blocks_args:
assert block_args.num_repeat > 0
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(block_args.input_filters, self._mconfig)
output_filters = round_filters(block_args.output_filters, self._mconfig)
repeats = round_repeats(block_args.num_repeat,
self._mconfig.depth_coefficient)
block_args.update(
dict(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=repeats))
# The first block needs to take care of stride and filter size increase.
conv_block = {0: MBConvBlock, 1: FusedMBConvBlock}[block_args.conv_type]
self._blocks.append(
conv_block(block_args, self._mconfig, name=block_name()))
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args.input_filters = block_args.output_filters
block_args.strides = 1
# pylint: enable=protected-access
for _ in range(block_args.num_repeat - 1):
self._blocks.append(
conv_block(block_args, self._mconfig, name=block_name()))
initializer = keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')
initializer_random = keras.initializers.random_uniform(minval=-0.03, maxval=0.03)
activation = utils.get_act_fn(self._mconfig.act_fn)
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._workers_probs0 = keras.layers.Dense(128, activation=activation, kernel_initializer=initializer)
self._workers_probs1 = keras.layers.Dense(actions_n, activation="softmax",
kernel_initializer=initializer_random)
self._baseline = keras.layers.Dense(1, kernel_initializer=initializer_random,
activation=keras.activations.tanh)
def call(self, inputs, training=False, mask=None):
outputs = self._stem(inputs, training)
for idx, block in enumerate(self._blocks):
survival_prob = self._mconfig.survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
# survival_prob = 1.0
outputs = block(outputs, training=training, survival_prob=survival_prob)
x = outputs
shape_x = tf.shape(x)
y = tf.reshape(x, (shape_x[0], -1, shape_x[-1]))
y = tf.reduce_mean(y, axis=1)
z1 = (x * inputs[:, :, :, :1])
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([z1, z2], axis=1)
w = self._workers_probs0(z)
w = self._workers_probs1(w)
probs = w
baseline = self._baseline(tf.concat([y, z], axis=1))
return probs, baseline
def get_config(self):
pass
model = EfficientModel(actions_shape)
return model
def actor_critic_efficient_shrub(actions_shape):
import copy
import itertools
import tensorflow as tf
import tensorflow.keras as keras
# import lux_ai.effnetv2_model as eff_model
import lux_ai.hparams as hparams
import lux_ai.effnetv2_configs as effnetv2_configs
import lux_ai.utils as utils
from lux_ai.effnetv2_model import round_filters, round_repeats, Stem, MBConvBlock, FusedMBConvBlock
class EfficientModel(keras.Model):
def __init__(self, actions_number, **kwargs):
model_name = 'efficientnetv2-s'
super().__init__(name=model_name, **kwargs)
cfg = copy.deepcopy(hparams.base_config)
if model_name:
cfg.override(effnetv2_configs.get_model_config(model_name))
self.cfg = cfg
self._mconfig = cfg.model
self._stem = Stem(self._mconfig, self._mconfig.blocks_args[0].input_filters)
self._blocks = []
block_id = itertools.count(0)
block_name = lambda: 'blocks_%d' % next(block_id)
for block_args in self._mconfig.blocks_args:
assert block_args.num_repeat > 0
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(block_args.input_filters, self._mconfig)
output_filters = round_filters(block_args.output_filters, self._mconfig)
repeats = round_repeats(block_args.num_repeat,
self._mconfig.depth_coefficient)
block_args.update(
dict(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=repeats))
# The first block needs to take care of stride and filter size increase.
conv_block = {0: MBConvBlock, 1: FusedMBConvBlock}[block_args.conv_type]
self._blocks.append(
conv_block(block_args, self._mconfig, name=block_name()))
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args.input_filters = block_args.output_filters
block_args.strides = 1
# pylint: enable=protected-access
for _ in range(block_args.num_repeat - 1):
self._blocks.append(
conv_block(block_args, self._mconfig, name=block_name()))
initializer = keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')
initializer_random = keras.initializers.random_uniform(minval=-0.03, maxval=0.03)
activation = utils.get_act_fn(self._mconfig.act_fn)
self._depthwise = keras.layers.DepthwiseConv2D(13)
self._flatten = keras.layers.Flatten()
self._workers_probs0 = keras.layers.Dense(128, activation=activation, kernel_initializer=initializer)
# action type
self._workers_probs1_0 = keras.layers.Dense(actions_number[0][0], activation="softmax",
kernel_initializer=initializer_random)
# movement direction
self._workers_probs1_1 = keras.layers.Dense(actions_number[1][0], activation="softmax",
kernel_initializer=initializer_random)
# transfer direction
self._workers_probs1_2 = keras.layers.Dense(actions_number[1][0], activation="softmax",
kernel_initializer=initializer_random)
# resource to transfer
self._workers_probs1_3 = keras.layers.Dense(actions_number[2][0], activation="softmax",
kernel_initializer=initializer_random)
self._baseline = keras.layers.Dense(1, kernel_initializer=initializer_random,
activation=keras.activations.tanh)
def call(self, inputs, training=False, mask=None):
outputs = self._stem(inputs, training)
for idx, block in enumerate(self._blocks):
# survival_prob = self._mconfig.survival_prob
# if survival_prob:
# drop_rate = 1.0 - survival_prob
# survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
survival_prob = 1.0
outputs = block(outputs, training=training, survival_prob=survival_prob)
x = outputs
shape_x = tf.shape(x)
y = tf.reshape(x, (shape_x[0], -1, shape_x[-1]))
y = tf.reduce_mean(y, axis=1)
z1 = (x * inputs[:, :, :, :1])
shape_z = tf.shape(z1)
z1 = tf.reshape(z1, (shape_z[0], -1, shape_z[-1]))
z1 = tf.reduce_sum(z1, axis=1)
z2 = self._depthwise(x)
z2 = self._flatten(z2)
z = tf.concat([z1, z2], axis=1)
w = self._workers_probs0(z)
probs0 = self._workers_probs1_0(w)
probs1 = self._workers_probs1_1(w)
probs2 = self._workers_probs1_2(w)
probs3 = self._workers_probs1_3(w)
baseline = self._baseline(tf.concat([y, z], axis=1))
return probs0, probs1, probs2, probs3, baseline
def get_config(self):
pass
model = EfficientModel(actions_shape)
# model = eff_model.get_model("efficientnetv2-s", weights=None)
return model
| 43.907692
| 120
| 0.593849
| 4,043
| 37,102
| 5.186743
| 0.064556
| 0.049833
| 0.065427
| 0.045398
| 0.875203
| 0.851264
| 0.841869
| 0.835575
| 0.788269
| 0.767811
| 0
| 0.023234
| 0.309768
| 37,102
| 844
| 121
| 43.959716
| 0.795619
| 0.052504
| 0
| 0.766234
| 0
| 0
| 0.011253
| 0
| 0
| 0
| 0
| 0
| 0.003247
| 1
| 0.086039
| false
| 0.011364
| 0.042208
| 0
| 0.202922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f2fe1e8d5a3a54c3c011686c3d2c1d97c948541d
| 12,002
|
py
|
Python
|
2018/day_15/python/day15.py
|
josephroquedev/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | null | null | null |
2018/day_15/python/day15.py
|
josephroquedev/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | 2
|
2021-06-02T00:41:38.000Z
|
2021-11-30T10:05:29.000Z
|
2018/day_15/python/day15.py
|
autoreleasefool/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | null | null | null |
from aoc import AOC
import queue
aoc = AOC(year=2018, day=15)
data = aoc.load()
walls = set()
caverns = set()
goblins = {}
elves = {}
beings = {}
total_elves = 0
total_goblins = 0
max_width = 0
max_height = 0
for y, line in enumerate(data.lines()):
line = line.strip()
for x, c in enumerate(line):
parsed_cell = (x, y)
if c == "#":
walls.add(parsed_cell)
elif c == ".":
caverns.add(parsed_cell)
elif c == "G":
gob_id = "G{}".format(total_goblins)
goblins[gob_id] = {"id": gob_id, "attack": 3, "hp": 200, "x": x, "y": y}
beings[parsed_cell] = gob_id
total_goblins += 1
caverns.add(parsed_cell)
elif c == "E":
elf_id = "E{}".format(total_elves)
elves[elf_id] = {"id": elf_id, "attack": 3, "hp": 200, "x": x, "y": y}
beings[parsed_cell] = elf_id
total_elves += 1
caverns.add(parsed_cell)
if y == 0:
max_width += 1
max_height += 1
def reading_order(c1, c2):
if c1[1] < c2[1]:
return c1
if c2[1] < c1[1]:
return c2
return c1 if c1[0] < c2[0] else c2
def neighbors(n):
ns = []
if n[1] > 0:
pot = (n[0], n[1] - 1)
if pot not in walls:
ns.append(pot)
if n[0] > 0:
pot = (n[0] - 1, n[1])
if pot not in walls:
ns.append(pot)
if n[0] < max_width - 1:
pot = (n[0] + 1, n[1])
if pot not in walls:
ns.append(pot)
if n[1] < max_height - 1:
pot = (n[0], n[1] + 1)
if pot not in walls:
ns.append(pot)
return ns
def bfs(cell, targets):
targets = set(targets)
q = queue.PriorityQueue()
dist = {cell: 0}
prev = {}
q.put((0, 0, 0, cell))
def build_path(cell):
path = [cell]
p = prev[cell]
while p is not None:
path.append(p)
p = prev[p] if p in prev else None
path.reverse()
return path
while not q.empty():
n = q.get()[3]
for ne in neighbors(n):
if ne in prev:
continue
if ne in caverns and ne not in beings:
alt = dist[n] + 1
if ne not in dist or alt < dist[ne]:
dist[ne] = alt
prev[ne] = n
if ne in targets:
return build_path(ne)[1]
q.put((alt, ne[1], ne[0], ne))
return None
def remove(removed):
if removed["id"][0] == "G":
del goblins[removed["id"]]
else:
del elves[removed["id"]]
del beings[(removed["x"], removed["y"])]
def get_target(cell, friendly_type):
enemies = []
for n in neighbors(cell):
if friendly_type == "E" and n in beings and beings[n][0] == "G":
enemies.append((goblins[beings[n]], n))
elif friendly_type == "G" and n in beings and beings[n][0] == "E":
enemies.append((elves[beings[n]], n))
target = None
for e in enemies:
if target is None:
target = e
elif e[0]["hp"] < target[0]["hp"]:
target = e
else:
target_order = reading_order(target[1], e[1])
if target_order != target[1]:
target = e
return target[0] if target else None
def get_target_spaces(enemies):
spaces = []
for enemy_id in enemies:
enemy = enemies[enemy_id]
enemy_cell = (enemy["x"], enemy["y"])
adjacent = neighbors(enemy_cell)
spaces.extend(adjacent)
return spaces
fighting = True
current_round = 0
# print_status()
while fighting:
order = sorted(list(beings.keys()), key=lambda x: (x[1], x[0]))
for being_position in order:
if being_position not in beings:
continue
being_id = beings[being_position]
being_type = being_id[0]
if (being_type == "E" and not goblins) or (being_type == "G" and not elves):
fighting = False
break
being = elves[being_id] if being_type == "E" else goblins[being_id]
# pylint: disable=unsupported-assignment-operation, unsubscriptable-object
current_target = get_target(being_position, being_type)
if current_target is not None:
current_target["hp"] -= being["attack"]
if current_target["hp"] <= 0:
remove(current_target)
else:
open_spaces = get_target_spaces(elves if being_type == "G" else goblins)
if open_spaces:
next_space = bfs(being_position, open_spaces)
if next_space:
del beings[being_position]
beings[next_space] = being_id
if being_type == "E":
elves[being_id]["x"] = next_space[0]
elves[being_id]["y"] = next_space[1]
else:
goblins[being_id]["x"] = next_space[0]
goblins[being_id]["y"] = next_space[1]
if next_space:
current_target = get_target(next_space, being_type)
if current_target:
current_target["hp"] -= being["attack"]
if current_target["hp"] <= 0:
remove(current_target)
current_round += 1
survivors = elves if elves else goblins
total_health = sum(survivors[s]["hp"] for s in survivors)
aoc.p1((current_round - 1) * total_health)
## Part 2
def run_sim(starting_attack):
walls = set()
caverns = set()
goblins = {}
elves = {}
beings = {}
total_elves = 0
total_goblins = 0
max_width = 0
max_height = 0
for y, line in enumerate(data.lines()):
line = line.strip()
for x, c in enumerate(line):
cell = (x, y)
if c == "#":
walls.add(cell)
elif c == ".":
caverns.add(cell)
elif c == "G":
gob_id = "G{}".format(total_goblins)
goblins[gob_id] = {"id": gob_id, "attack": 3, "hp": 200, "x": x, "y": y}
beings[cell] = gob_id
total_goblins += 1
caverns.add(cell)
elif c == "E":
elf_id = "E{}".format(total_elves)
elves[elf_id] = {
"id": elf_id,
"attack": starting_attack,
"hp": 200,
"x": x,
"y": y,
}
beings[cell] = elf_id
total_elves += 1
caverns.add(cell)
if y == 0:
max_width += 1
max_height += 1
def reading_order(c1, c2):
if c1[1] < c2[1]:
return c1
if c2[1] < c1[1]:
return c2
return c1 if c1[0] < c2[0] else c2
def neighbors(n):
ns = []
if n[1] > 0:
pot = (n[0], n[1] - 1)
if pot not in walls:
ns.append(pot)
if n[0] > 0:
pot = (n[0] - 1, n[1])
if pot not in walls:
ns.append(pot)
if n[0] < max_width - 1:
pot = (n[0] + 1, n[1])
if pot not in walls:
ns.append(pot)
if n[1] < max_height - 1:
pot = (n[0], n[1] + 1)
if pot not in walls:
ns.append(pot)
return ns
def bfs(cell, targets):
targets = set(targets)
q = queue.PriorityQueue()
dist = {cell: 0}
prev = {}
q.put((0, 0, 0, cell))
def build_path(cell):
path = [cell]
p = prev[cell]
while p is not None:
path.append(p)
p = prev[p] if p in prev else None
path.reverse()
return path
while not q.empty():
n = q.get()[3]
for ne in neighbors(n):
if ne in prev:
continue
if ne in caverns and ne not in beings:
alt = dist[n] + 1
if ne not in dist or alt < dist[ne]:
dist[ne] = alt
prev[ne] = n
if ne in targets:
return build_path(ne)[1]
q.put((alt, ne[1], ne[0], ne))
return None
def remove(being):
if being["id"][0] == "G":
del goblins[being["id"]]
else:
del elves[being["id"]]
del beings[(being["x"], being["y"])]
def get_target(cell, being_type):
enemies = []
for n in neighbors(cell):
if being_type == "E" and n in beings and beings[n][0] == "G":
enemies.append((goblins[beings[n]], n))
elif being_type == "G" and n in beings and beings[n][0] == "E":
enemies.append((elves[beings[n]], n))
target = None
for e in enemies:
if target is None:
target = e
elif e[0]["hp"] < target[0]["hp"]:
target = e
else:
target_order = reading_order(target[1], e[1])
if target_order != target[1]:
target = e
return target[0] if target else None
def get_target_spaces(enemies):
spaces = []
for enemy_id in enemies:
enemy = enemies[enemy_id]
enemy_cell = (enemy["x"], enemy["y"])
adjacent = neighbors(enemy_cell)
spaces.extend(adjacent)
return spaces
fighting = True
current_round = 0
while fighting:
order = sorted(list(beings.keys()), key=lambda x: (x[1], x[0]))
for being_position in order:
if being_position not in beings:
continue
being_id = beings[being_position]
being_type = being_id[0]
if (being_type == "E" and not goblins) or (being_type == "G" and not elves):
fighting = False
break
being = elves[being_id] if being_type == "E" else goblins[being_id]
# pylint: disable=unsupported-assignment-operation, unsubscriptable-object
target = get_target(being_position, being_type)
if target is not None:
target["hp"] -= being["attack"]
if target["hp"] <= 0:
if target["id"][0] == "E":
return False
remove(target)
else:
open_spaces = get_target_spaces(elves if being_type == "G" else goblins)
if open_spaces:
next_space = bfs(being_position, open_spaces)
if next_space:
del beings[being_position]
beings[next_space] = being_id
if being_type == "E":
elves[being_id]["x"] = next_space[0]
elves[being_id]["y"] = next_space[1]
else:
goblins[being_id]["x"] = next_space[0]
goblins[being_id]["y"] = next_space[1]
if next_space:
target = get_target(next_space, being_type)
if target:
target["hp"] -= being["attack"]
if target["hp"] <= 0:
if target["id"][0] == "E":
return False
remove(target)
current_round += 1
survivors = elves if elves else goblins
total_health = sum(survivors[s]["hp"] for s in survivors)
return True
elf_died = True
attack = 4
while elf_died:
elf_died = not run_sim(attack)
if not elf_died:
aoc.p2(attack)
break
attack += 1
| 29.781638
| 88
| 0.469588
| 1,495
| 12,002
| 3.646823
| 0.086288
| 0.026963
| 0.018158
| 0.013206
| 0.917828
| 0.893434
| 0.889398
| 0.883162
| 0.820249
| 0.820249
| 0
| 0.025637
| 0.411765
| 12,002
| 402
| 89
| 29.855721
| 0.746601
| 0.013914
| 0
| 0.840708
| 0
| 0
| 0.01471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044248
| false
| 0
| 0.0059
| 0
| 0.112094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8418d9c2f67bb04f7a5ed5a79b6630a6b2a14d40
| 340
|
py
|
Python
|
solutions/drop_columns.py
|
SdgJlbl/ml-workshop
|
f9bf4c23d5d02dde16e4fdf80e728447adf71b1f
|
[
"MIT"
] | 5
|
2018-03-27T13:30:48.000Z
|
2018-03-29T21:42:27.000Z
|
solutions/drop_columns.py
|
SdgJlbl/ml-workshop
|
f9bf4c23d5d02dde16e4fdf80e728447adf71b1f
|
[
"MIT"
] | null | null | null |
solutions/drop_columns.py
|
SdgJlbl/ml-workshop
|
f9bf4c23d5d02dde16e4fdf80e728447adf71b1f
|
[
"MIT"
] | 2
|
2018-03-27T13:31:26.000Z
|
2018-10-08T13:08:59.000Z
|
df_ml = df.drop(['First_pokemon', 'Second_pokemon', 'Winner',
'Name', 'Name_opponent',
'Generation', 'Generation_opponent'], axis=1)
# df_ml = df.drop(columns=['First_pokemon', 'Second_pokemon', 'Winner',
# 'Name', 'Name_opponent',
# 'Generation', 'Generation_opponent'])
| 48.571429
| 71
| 0.564706
| 33
| 340
| 5.515152
| 0.393939
| 0.043956
| 0.065934
| 0.10989
| 0.824176
| 0.824176
| 0.824176
| 0.824176
| 0.824176
| 0.824176
| 0
| 0.004
| 0.264706
| 340
| 6
| 72
| 56.666667
| 0.724
| 0.485294
| 0
| 0
| 0
| 0
| 0.461988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ffcc47585775975ffce42d7b1ff2e5dfac6f312a
| 540
|
py
|
Python
|
test/test_function.py
|
AICyberTeam/retrievalSystem
|
3a6b4a3f38ed179b529d3ac0cf0aad9aa6b98903
|
[
"MIT"
] | 51
|
2021-11-07T03:14:12.000Z
|
2022-03-31T11:47:56.000Z
|
test/test_function.py
|
AICyberTeam/retrievalSystem
|
3a6b4a3f38ed179b529d3ac0cf0aad9aa6b98903
|
[
"MIT"
] | null | null | null |
test/test_function.py
|
AICyberTeam/retrievalSystem
|
3a6b4a3f38ed179b529d3ac0cf0aad9aa6b98903
|
[
"MIT"
] | 13
|
2021-11-07T03:14:14.000Z
|
2022-03-20T11:14:03.000Z
|
import numpy as np
data = [
{
"image_id": 11,
"image_path": "../data/test_data/images/00013.jpg",
"user_id": 1,
"privilege": 1
},
{
"image_id": 33,
"image_path": "../data/test_data/images/00013.jpg",
"user_id": 1,
"privilege": 1
},
{
"image_id": 32,
"image_path": "../data/test_data/images/00013.jpg",
"user_id": 2,
"privilege": 1
}
]
print(data)
| 22.5
| 63
| 0.412963
| 55
| 540
| 3.836364
| 0.363636
| 0.099526
| 0.184834
| 0.241706
| 0.753555
| 0.753555
| 0.753555
| 0.753555
| 0.753555
| 0.753555
| 0
| 0.087662
| 0.42963
| 540
| 24
| 64
| 22.5
| 0.597403
| 0
| 0
| 0.363636
| 0
| 0
| 0.377079
| 0.18854
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0.045455
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
08385bf23f12ac0d08559de32f6ec5af4d0c2485
| 10,308
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowClnsInterface/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowClnsInterface/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowClnsInterface/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"interfaces": {
"GigabitEthernet1": {
"line_protocol": "up",
"status": "up",
"clns_protocol_processing": False,
},
"GigabitEthernet2": {
"line_protocol": "up",
"status": "up",
"checksum_enabled": True,
"mtu": 1497,
"encapsulation": "SAP",
"erpdus_enabled": True,
"min_interval_msec": 10,
"clns_fast_switching": True,
"clns_sse_switching": False,
"dec_compatibility_mode": "OFF",
"next_esh_ish_in": 55,
"routing_protocol": {
"IS-IS": {
"process_id": {
"test": {
"level_type": "level-1-2",
"interface_number": "0x1",
"local_circuit_id": "0x1",
"neighbor_extended_local_circuit_id": "0x0",
"level-1": {
"metric": 10,
"circuit_id": "R2.01",
"dr_id": "R2.01",
"ipv6_metric": 10,
},
"priority": {
"level-1": {"priority": 64},
"level-2": {"priority": 64},
},
"adjacencies": {
"level-1": {"number_of_active_adjancies": 1},
"level-2": {"number_of_active_adjancies": 0},
},
"level-2": {
"metric": 10,
"circuit_id": "R2.01",
"dr_id": "0000.0000.0000.00",
"ipv6_metric": 10,
},
"hello_interval": {
"level-1": {"next_is_is_lan_hello_in_ms": 432},
"level-2": {"next_is_is_lan_hello_in": 4},
},
}
}
}
},
},
"GigabitEthernet3": {
"line_protocol": "up",
"status": "up",
"checksum_enabled": True,
"mtu": 1497,
"encapsulation": "SAP",
"erpdus_enabled": True,
"min_interval_msec": 10,
"clns_fast_switching": True,
"clns_sse_switching": False,
"dec_compatibility_mode": "OFF",
"next_esh_ish_in": 15,
"routing_protocol": {
"IS-IS": {
"process_id": {
"test": {
"level_type": "level-1-2",
"interface_number": "0x2",
"local_circuit_id": "0x2",
"neighbor_extended_local_circuit_id": "0x0",
"level-1": {
"metric": 10,
"circuit_id": "R2.02",
"dr_id": "0000.0000.0000.00",
"ipv6_metric": 10,
},
"priority": {
"level-1": {"priority": 64},
"level-2": {"priority": 64},
},
"adjacencies": {
"level-1": {"number_of_active_adjancies": 0},
"level-2": {"number_of_active_adjancies": 0},
},
"level-2": {
"metric": 10,
"circuit_id": "R2.02",
"dr_id": "0000.0000.0000.00",
"ipv6_metric": 10,
},
"hello_interval": {
"level-1": {"next_is_is_lan_hello_in": 5},
"level-2": {"next_is_is_lan_hello_in": 6},
},
}
}
}
},
},
"GigabitEthernet4": {
"line_protocol": "up",
"status": "up",
"checksum_enabled": True,
"mtu": 1497,
"encapsulation": "SAP",
"erpdus_enabled": True,
"min_interval_msec": 10,
"clns_fast_switching": True,
"clns_sse_switching": False,
"dec_compatibility_mode": "OFF",
"next_esh_ish_in": 32,
"routing_protocol": {
"IS-IS": {
"process_id": {
"VRF1": {
"level_type": "level-1-2",
"interface_number": "0x1",
"local_circuit_id": "0x1",
"neighbor_extended_local_circuit_id": "0x0",
"level-1": {
"metric": 10,
"circuit_id": "R2.01",
"dr_id": "0000.0000.0000.00",
"ipv6_metric": 10,
},
"priority": {
"level-1": {"priority": 64},
"level-2": {"priority": 64},
},
"adjacencies": {
"level-1": {"number_of_active_adjancies": 0},
"level-2": {"number_of_active_adjancies": 0},
},
"level-2": {
"metric": 10,
"circuit_id": "R2.01",
"dr_id": "0000.0000.0000.00",
"ipv6_metric": 10,
},
"hello_interval": {
"level-1": {"next_is_is_lan_hello_in": 2},
"level-2": {"next_is_is_lan_hello_in": 7},
},
}
}
}
},
},
"Loopback0": {
"line_protocol": "up",
"status": "up",
"checksum_enabled": True,
"mtu": 1514,
"encapsulation": "LOOPBACK",
"erpdus_enabled": True,
"min_interval_msec": 10,
"clns_fast_switching": False,
"clns_sse_switching": False,
"dec_compatibility_mode": "OFF",
"next_esh_ish_in": 36,
"routing_protocol": {
"IS-IS": {
"process_id": {
"test": {
"level_type": "level-1-2",
"interface_number": "0x0",
"local_circuit_id": "0x7",
"neighbor_extended_local_circuit_id": "0x0",
"level-1": {
"metric": 10,
"circuit_id": "R2.07",
"ipv6_metric": 10,
},
"priority": {
"level-1": {"priority": 64},
"level-2": {"priority": 64},
},
"adjacencies": {
"level-1": {"number_of_active_adjancies": 0},
"level-2": {"number_of_active_adjancies": 0},
},
"level-2": {
"metric": 10,
"circuit_id": "R2.07",
"ipv6_metric": 10,
},
"hello_interval": {"next_is_is_hello_in": 0},
"if_state": "Down",
}
}
}
},
},
"Loopback1": {
"line_protocol": "up",
"status": "up",
"checksum_enabled": True,
"mtu": 1514,
"encapsulation": "LOOPBACK",
"erpdus_enabled": True,
"min_interval_msec": 10,
"clns_fast_switching": False,
"clns_sse_switching": False,
"dec_compatibility_mode": "OFF",
"next_esh_ish_in": 49,
"routing_protocol": {
"IS-IS": {
"process_id": {
"VRF1": {
"level_type": "level-1-2",
"interface_number": "0x0",
"local_circuit_id": "0x8",
"neighbor_extended_local_circuit_id": "0x0",
"level-1": {
"metric": 10,
"circuit_id": "R2.08",
"ipv6_metric": 10,
},
"priority": {
"level-1": {"priority": 64},
"level-2": {"priority": 64},
},
"adjacencies": {
"level-1": {"number_of_active_adjancies": 0},
"level-2": {"number_of_active_adjancies": 0},
},
"level-2": {
"metric": 10,
"circuit_id": "R2.08",
"ipv6_metric": 10,
},
"hello_interval": {"next_is_is_hello_in": 0},
"if_state": "Down",
}
}
}
},
},
}
}
| 41.732794
| 79
| 0.307916
| 670
| 10,308
| 4.4
| 0.141791
| 0.046811
| 0.04749
| 0.057666
| 0.937585
| 0.930122
| 0.930122
| 0.930122
| 0.905699
| 0.902985
| 0
| 0.072474
| 0.575669
| 10,308
| 246
| 80
| 41.902439
| 0.601509
| 0
| 0
| 0.686992
| 0
| 0
| 0.293364
| 0.068393
| 0
| 0
| 0.004366
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f23fc4ebd66e9eeaf982e3b50daff2102106af32
| 5,880
|
py
|
Python
|
stencils/mpi125pt.py
|
OscarAntepara/bricklib
|
344ae3088fe8f40b80254e9ecd90391349a06255
|
[
"MIT"
] | 3
|
2020-10-31T00:39:25.000Z
|
2021-03-02T14:34:17.000Z
|
stencils/mpi125pt.py
|
OscarAntepara/bricklib
|
344ae3088fe8f40b80254e9ecd90391349a06255
|
[
"MIT"
] | 1
|
2021-09-07T14:05:26.000Z
|
2021-09-10T20:11:42.000Z
|
stencils/mpi125pt.py
|
OscarAntepara/bricklib
|
344ae3088fe8f40b80254e9ecd90391349a06255
|
[
"MIT"
] | 3
|
2020-10-31T02:46:58.000Z
|
2022-03-03T21:44:47.000Z
|
from st.expr import Index, ConstRef
from st.grid import Grid
# Declare indices
i = Index(0)
j = Index(1)
k = Index(2)
# Declare grid
input = Grid("in", 3)
output = Grid("out", 3)
# Symmetries of 125pt stencil imply 10 constants, permuted +/-
# 0 0 0 - 1 of these
a0 = ConstRef("MPI_C0")
# 0 0 1 - 6 of these
a1 = ConstRef("MPI_C1")
# 0 0 2 - 6 of these
a2 = ConstRef("MPI_C2")
# 0 1 1 - 12 of these
a3 = ConstRef("MPI_C3")
# 0 1 2 - 24 of these
a4 = ConstRef("MPI_C4")
# 0 2 2 - 12 of these
a5 = ConstRef("MPI_C5")
# 1 1 1 - 8 of these
a6 = ConstRef("MPI_C6")
# 1 1 2 - 24 of these
a7 = ConstRef("MPI_C7")
# 1 2 2 - 24 of these
a8 = ConstRef("MPI_C8")
# 2 2 2 - 8 of these
a9 = ConstRef("MPI_C9")
# Express computation
# output[i, j, k] is assumed
calc = \
a0 * input(i, j, k) + \
a1 * input(i + 1, j, k) + \
a1 * input(i - 1, j, k) + \
a1 * input(i, j + 1, k) + \
a1 * input(i, j - 1, k) + \
a1 * input(i, j, k + 1) + \
a1 * input(i, j, k - 1) + \
a2 * input(i + 2, j, k) + \
a2 * input(i - 2, j, k) + \
a2 * input(i, j + 2, k) + \
a2 * input(i, j - 2, k) + \
a2 * input(i, j, k + 2) + \
a2 * input(i, j, k - 2) + \
a3 * input(i + 1, j + 1, k) + \
a3 * input(i - 1, j + 1, k) + \
a3 * input(i + 1, j - 1, k) + \
a3 * input(i - 1, j - 1, k) + \
a3 * input(i + 1, j, k + 1) + \
a3 * input(i - 1, j, k + 1) + \
a3 * input(i + 1, j, k - 1) + \
a3 * input(i - 1, j, k - 1) + \
a3 * input(i, j + 1, k + 1) + \
a3 * input(i, j - 1, k + 1) + \
a3 * input(i, j + 1, k - 1) + \
a3 * input(i, j - 1, k - 1) + \
a4 * input(i + 1, j + 2, k) + \
a4 * input(i - 1, j + 2, k) + \
a4 * input(i + 1, j - 2, k) + \
a4 * input(i - 1, j - 2, k) + \
a4 * input(i + 1, j, k + 2) + \
a4 * input(i - 1, j, k + 2) + \
a4 * input(i + 1, j, k - 2) + \
a4 * input(i - 1, j, k - 2) + \
a4 * input(i, j + 1, k + 2) + \
a4 * input(i, j - 1, k + 2) + \
a4 * input(i, j + 1, k - 2) + \
a4 * input(i, j - 1, k - 2) + \
a4 * input(i + 2, j + 1, k) + \
a4 * input(i - 2, j + 1, k) + \
a4 * input(i + 2, j - 1, k) + \
a4 * input(i - 2, j - 1, k) + \
a4 * input(i + 2, j, k + 1) + \
a4 * input(i - 2, j, k + 1) + \
a4 * input(i + 2, j, k - 1) + \
a4 * input(i - 2, j, k - 1) + \
a4 * input(i, j + 2, k + 1) + \
a4 * input(i, j - 2, k + 1) + \
a4 * input(i, j + 2, k - 1) + \
a4 * input(i, j - 2, k - 1) + \
a5 * input(i + 2, j + 2, k) + \
a5 * input(i - 2, j + 2, k) + \
a5 * input(i + 2, j - 2, k) + \
a5 * input(i - 2, j - 2, k) + \
a5 * input(i + 2, j, k + 2) + \
a5 * input(i - 2, j, k + 2) + \
a5 * input(i + 2, j, k - 2) + \
a5 * input(i - 2, j, k - 2) + \
a5 * input(i, j + 2, k + 2) + \
a5 * input(i, j - 2, k + 2) + \
a5 * input(i, j + 2, k - 2) + \
a5 * input(i, j - 2, k - 2) + \
a6 * input(i + 1, j + 1, k + 1) + \
a6 * input(i - 1, j + 1, k + 1) + \
a6 * input(i + 1, j - 1, k + 1) + \
a6 * input(i - 1, j - 1, k + 1) + \
a6 * input(i + 1, j + 1, k - 1) + \
a6 * input(i - 1, j + 1, k - 1) + \
a6 * input(i + 1, j - 1, k - 1) + \
a6 * input(i - 1, j - 1, k - 1) + \
a7 * input(i + 1, j + 1, k + 2) + \
a7 * input(i - 1, j + 1, k + 2) + \
a7 * input(i + 1, j - 1, k + 2) + \
a7 * input(i - 1, j - 1, k + 2) + \
a7 * input(i + 1, j + 1, k - 2) + \
a7 * input(i - 1, j + 1, k - 2) + \
a7 * input(i + 1, j - 1, k - 2) + \
a7 * input(i - 1, j - 1, k - 2) + \
a7 * input(i + 1, j + 2, k + 1) + \
a7 * input(i - 1, j + 2, k + 1) + \
a7 * input(i + 1, j - 2, k + 1) + \
a7 * input(i - 1, j - 2, k + 1) + \
a7 * input(i + 1, j + 2, k - 1) + \
a7 * input(i - 1, j + 2, k - 1) + \
a7 * input(i + 1, j - 2, k - 1) + \
a7 * input(i - 1, j - 2, k - 1) + \
a7 * input(i + 2, j + 1, k + 1) + \
a7 * input(i - 2, j + 1, k + 1) + \
a7 * input(i + 2, j - 1, k + 1) + \
a7 * input(i - 2, j - 1, k + 1) + \
a7 * input(i + 2, j + 1, k - 1) + \
a7 * input(i - 2, j + 1, k - 1) + \
a7 * input(i + 2, j - 1, k - 1) + \
a7 * input(i - 2, j - 1, k - 1) + \
a8 * input(i + 2, j + 2, k + 1) + \
a8 * input(i - 2, j + 2, k + 1) + \
a8 * input(i + 2, j - 2, k + 1) + \
a8 * input(i - 2, j - 2, k + 1) + \
a8 * input(i + 2, j + 2, k - 1) + \
a8 * input(i - 2, j + 2, k - 1) + \
a8 * input(i + 2, j - 2, k - 1) + \
a8 * input(i - 2, j - 2, k - 1) + \
a8 * input(i + 2, j + 1, k + 2) + \
a8 * input(i - 2, j + 1, k + 2) + \
a8 * input(i + 2, j - 1, k + 2) + \
a8 * input(i - 2, j - 1, k + 2) + \
a8 * input(i + 2, j + 1, k - 2) + \
a8 * input(i - 2, j + 1, k - 2) + \
a8 * input(i + 2, j - 1, k - 2) + \
a8 * input(i - 2, j - 1, k - 2) + \
a8 * input(i + 1, j + 2, k + 2) + \
a8 * input(i - 1, j + 2, k + 2) + \
a8 * input(i + 1, j - 2, k + 2) + \
a8 * input(i - 1, j - 2, k + 2) + \
a8 * input(i + 1, j + 2, k - 2) + \
a8 * input(i - 1, j + 2, k - 2) + \
a8 * input(i + 1, j - 2, k - 2) + \
a8 * input(i - 1, j - 2, k - 2) + \
a9 * input(i + 2, j + 2, k + 2) + \
a9 * input(i - 2, j + 2, k + 2) + \
a9 * input(i + 2, j - 2, k + 2) + \
a9 * input(i - 2, j - 2, k + 2) + \
a9 * input(i + 2, j + 2, k - 2) + \
a9 * input(i - 2, j + 2, k - 2) + \
a9 * input(i + 2, j - 2, k - 2) + \
a9 * input(i - 2, j - 2, k - 2)
output(i, j, k).assign(calc)
STENCIL = [output]
| 35.636364
| 62
| 0.351361
| 1,082
| 5,880
| 1.900185
| 0.055453
| 0.364786
| 0.170233
| 0.194553
| 0.7607
| 0.75
| 0.737354
| 0.737354
| 0.737354
| 0.72714
| 0
| 0.148898
| 0.428912
| 5,880
| 164
| 63
| 35.853659
| 0.463371
| 0.056293
| 0
| 0
| 0
| 0
| 0.011748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013793
| 0
| 0.013793
| 0
| 0
| 0
| 1
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4b472aed6ceda241eb38580b1a0b490f1aebb390
| 6,482
|
py
|
Python
|
urls.py
|
margasiewicz/FootballTeam-Bots
|
5b639ac33eb41fe0ebf27ab21644f3bfaf162052
|
[
"Unlicense"
] | null | null | null |
urls.py
|
margasiewicz/FootballTeam-Bots
|
5b639ac33eb41fe0ebf27ab21644f3bfaf162052
|
[
"Unlicense"
] | null | null | null |
urls.py
|
margasiewicz/FootballTeam-Bots
|
5b639ac33eb41fe0ebf27ab21644f3bfaf162052
|
[
"Unlicense"
] | null | null | null |
skill_hover = {
"ofensywa": '//*[@id="container"]/div/div[2]/div[1]/div[2]/div/div[2]/div[2]',
"defensywa": '//*[@id="container"]/div/div[2]/div[1]/div[3]/div/div[2]/div[2]',
"rozgrywanie:": '//*[@id="container"]/div/div[2]/div[1]/div[4]/div/div[2]/div[2]',
"kondycja": '//*[@id="container"]/div/div[2]/div[1]/div[5]/div/div[2]/div[2]',
"czytanie": '//*[@id="container"]/div/div[2]/div[1]/div[6]/div/div[2]/div[2]',
"pressing": '//*[@id="container"]/div/div[2]/div[1]/div[7]/div/div[2]/div[2]',
"stale_fragmenty": '//*[@id="container"]/div/div[2]/div[1]/div[8]/div/div[2]/div[2]',
"skutecznosc": '//*[@id="container"]/div/div[2]/div[1]/div[9]/div/div[2]/div[2]'
}
skill_btns = {
"ofensywa": '//*[@id="container"]/div/div[2]/div[1]/div[2]/div/div[2]/button',
"defensywa": '//*[@id="container"]/div/div[2]/div[1]/div[3]/div/div[2]/button',
"rozgrywanie:": '//*[@id="container"]/div/div[2]/div[1]/div[4]/div/div[2]/button',
"kondycja": '//*[@id="container"]/div/div[2]/div[1]/div[5]/div/div[2]/button',
"czytanie": '//*[@id="container"]/div/div[2]/div[1]/div[6]/div/div[2]/button',
"pressing": '//*[@id="container"]/div/div[2]/div[1]/div[7]/div/div[2]/button',
"stale_fragmenty": '//*[@id="container"]/div/div[2]/div[1]/div[8]/div/div[2]/button',
"skutecznosc": '//*[@id="container"]/div/div[2]/div[1]/div[9]/div/div[2]/button',
}
spec_urls = {
"ofensywa": "https://game.footballteam.pl/training/specialization/offensive",
"defensywa": "https://game.footballteam.pl/training/specialization/defensive",
"rozgrywanie": "https://game.footballteam.pl/training/specialization/playmaking",
"kondycja": "https://game.footballteam.pl/training/specialization/condition",
"czytanie": "https://game.footballteam.pl/training/specialization/reading",
"pressing": "https://game.footballteam.pl/training/specialization/pressing",
"stale_fragmenty": "https://game.footballteam.pl/training/specialization/freekicks",
"skutecznosc": "https://game.footballteam.pl/training/specialization/efficacy"
}
spec_hover = {
"jeden": '//*[@id="container"]/div/div[2]/div/div[2]/div[2]/div[2]',
"dwa": '//*[@id="container"]/div/div[2]/div/div[3]/div[2]/div[2]',
"trzy": '//*[@id="container"]/div/div[2]/div/div[4]/div[2]/div[2]',
"cztery": '//*[@id="container"]/div/div[2]/div/div[5]/div[2]/div[2]'
}
spec_btns = {
"jeden": '//*[@id="container"]/div/div[2]/div/div[2]/div[2]/button',
"dwa": '//*[@id="container"]/div/div[2]/div/div[3]/div[2]/button',
"trzy": '//*[@id="container"]/div/div[2]/div/div[4]/div[2]/button',
"cztery": '//*[@id="container"]/div/div[2]/div/div[5]/div[2]/button'
}
other_btns = {
"first_login_btn": '//*[@id="buttons"]/div/button[1]',
"login_screen": '//*[@id="modal-login-tab"]/h5',
"put_login": '//*[@id="login-form"]/div[1]/input',
"put_pass": '//*[@id="login-form"]/div[2]/input',
"login_btn": '//*[@id="btn-login"]'
}
work_hover = {
1:'//*[@id="container"]/div/div/div/div[1]/div[2]',
2:'//*[@id="container"]/div/div/div/div[2]/div[2]',
3:'//*[@id="container"]/div/div/div/div[3]/div[2]',
4:'//*[@id="container"]/div/div/div/div[4]/div[2]',
5:'//*[@id="container"]/div/div/div/div[5]/div[2]',
6:'//*[@id="container"]/div/div/div/div[6]/div[2]',
7:'//*[@id="container"]/div/div/div/div[7]/div[2]',
8:'//*[@id="container"]/div/div/div/div[8]/div[2]',
9:'//*[@id="container"]/div/div/div/div[9]/div[2]'
}
work_click={
1:'//*[@id="container"]/div/div/div/div[1]/div[2]/div[3]/button',
2:'//*[@id="container"]/div/div/div/div[2]/div[2]/div[3]/button',
3:'//*[@id="container"]/div/div/div/div[3]/div[2]/div[3]/button',
4:'//*[@id="container"]/div/div/div/div[4]/div[2]/div[3]/button',
5:'//*[@id="container"]/div/div/div/div[5]/div[2]/div[3]/button',
6:'//*[@id="container"]/div/div/div/div[6]/div[2]/div[3]/button',
7:'//*[@id="container"]/div/div/div/div[7]/div[2]/div[3]/button',
8:'//*[@id="container"]/div/div/div/div[8]/div[2]/div[3]/button',
9:'//*[@id="container"]/div/div/div/div[9]/div[2]/div[3]/button'
}
work_sleep_time={
1:30,
2:150,
3:360,
4:300,
5:1800,
6:5400,
7:600,
8:7200,
9:21600
}
food_hover = {
1:'//*[@id="container"]/div/div/div[1]/div[1]/div[2]',
2:'//*[@id="container"]/div/div/div[1]/div[2]/div[2]',
3:'//*[@id="container"]/div/div/div[1]/div[3]/div[2]',
4:'//*[@id="container"]/div/div/div[1]/div[4]/div[2]',
5:'//*[@id="container"]/div/div/div[1]/div[5]/div[2]',
6:'//*[@id="container"]/div/div/div[1]/div[6]/div[2]',
}
food_click = {
1:'//*[@id="container"]/div/div/div[1]/div[1]/div[2]/div[3]/button',
2:'//*[@id="container"]/div/div/div[1]/div[2]/div[2]/div[3]/button',
3:'//*[@id="container"]/div/div/div[1]/div[3]/div[2]/div[3]/button',
4:'//*[@id="container"]/div/div/div[1]/div[4]/div[2]/div[3]/button',
5:'//*[@id="container"]/div/div/div[1]/div[5]/div[2]/div[3]/button',
6:'//*[@id="container"]/div/div/div[1]/div[6]/div[2]/div[3]/button',
}
food_sleep_time = {
1:360,
2:1800,
3:7200,
4:14400,
5:21600,
6:43200,
}
market_btns = {
'toggle_item_color':'//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[1]/div/div/div[2]/div/img',
'gold':'//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[1]/div/div/div[2]/div/div/div[2]/div[3]/div',
'red': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[1]/div/div/div[2]/div/div/div[2]/div[4]',
'green': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[1]/div/div/div[2]/div/div/div[2]/div[5]/div',
'blue': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[1]/div/div/div[2]/div/div/div[2]/div[6]/div',
'grey': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[1]/div/div/div[2]/div/div/div[2]/div[7]/div',
'any': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[1]/div/div/div[2]/div/div/div[2]/div[1]/div',
'min-max': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[2]/div[1]/div/div[1]/div[4]/i[2]',
'max-min': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[2]/div[1]/div/div[1]/div[4]/i[1]',
'first_item': '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[2]/div[1]/div/div[2]/div[4]/b',
"buy": '//*[@id="container"]/div/div[2]/div[3]/div[2]/div/div[2]/div[1]/div/div[2]/div[5]/button',
"confirm": '//*[@id="container"]/div/div[2]/div[1]/div/div/div[3]/button[2]'
}
| 53.570248
| 113
| 0.568035
| 1,117
| 6,482
| 3.272158
| 0.080573
| 0.297127
| 0.193434
| 0.183311
| 0.827086
| 0.809302
| 0.705335
| 0.692476
| 0.685636
| 0.685636
| 0
| 0.063368
| 0.089479
| 6,482
| 121
| 114
| 53.570248
| 0.555913
| 0
| 0
| 0
| 0
| 0.475
| 0.799938
| 0.659263
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.008333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4b5a40319a139cb5307b19ea0b42c51275d71c68
| 151
|
py
|
Python
|
digsby/src/plugins/fbchat/fbchat_gui.py
|
ifwe/digsby
|
f5fe00244744aa131e07f09348d10563f3d8fa99
|
[
"Python-2.0"
] | 35
|
2015-08-15T14:32:38.000Z
|
2021-12-09T16:21:26.000Z
|
digsby/src/plugins/fbchat/fbchat_gui.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 4
|
2015-09-12T10:42:57.000Z
|
2017-02-27T04:05:51.000Z
|
digsby/src/plugins/fbchat/fbchat_gui.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 15
|
2015-07-10T23:58:07.000Z
|
2022-01-23T22:16:33.000Z
|
def construct_advanced_subpanel_im(panel, SP, MSP, MSC):
return True
def extract_advanced_subpanel_im(panel, info, SP, MSP, MSC):
return True
| 25.166667
| 60
| 0.754967
| 23
| 151
| 4.695652
| 0.565217
| 0.296296
| 0.333333
| 0.425926
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15894
| 151
| 5
| 61
| 30.2
| 0.850394
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
4b67d36563903a6fffdf1be44cb597271f295cd4
| 8,643
|
py
|
Python
|
tests/vm/test_machine_actions.py
|
proofdock/chaos-azure
|
85302f8be18153862656c587988eafb5dd37ddf7
|
[
"Apache-2.0"
] | 1
|
2021-04-24T20:01:54.000Z
|
2021-04-24T20:01:54.000Z
|
tests/vm/test_machine_actions.py
|
proofdock/chaos-azure
|
85302f8be18153862656c587988eafb5dd37ddf7
|
[
"Apache-2.0"
] | 23
|
2020-05-22T06:43:14.000Z
|
2021-02-25T21:02:28.000Z
|
tests/vm/test_machine_actions.py
|
proofdock/chaos-azure
|
85302f8be18153862656c587988eafb5dd37ddf7
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import ANY, MagicMock, patch
from azure.mgmt.compute import ComputeManagementClient
import pdchaosazure
from pdchaosazure.vm.actions import (burn_io, delete, fill_disk,
network_latency, restart, stop,
stress_cpu)
from tests.data import config_provider, machine_provider, secrets_provider
MACHINE_ALPHA = {
'name': 'VirtualMachineAlpha',
'resourceGroup': 'group'}
MACHINE_BETA = {
'name': 'VirtualMachineBeta',
'resourceGroup': 'group'}
class AnyStringWith(str):
def __eq__(self, other):
return self in other
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
def test_delete_one_machine(init, fetch):
client = MagicMock()
init.return_value = client
fetch.return_value = [MACHINE_ALPHA]
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
f = "where resourceGroup=='myresourcegroup'"
delete(f, configuration, secrets)
fetch.assert_called_with(f, configuration, secrets)
assert client.virtual_machines.begin_delete.call_count == 1
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
def test_delete_two_machines(init, fetch):
client = MagicMock()
init.return_value = client
fetch.return_value = [MACHINE_ALPHA, MACHINE_BETA]
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
f = "where resourceGroup=='myresourcegroup' | sample 2"
delete(f, configuration, secrets)
fetch.assert_called_with(f, configuration, secrets)
assert client.virtual_machines.begin_delete.call_count == 2
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
def test_stop_one_machine(init, fetch):
client = MagicMock()
init.return_value = client
machines = [MACHINE_ALPHA]
fetch.return_value = machines
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
f = "where resourceGroup=='myresourcegroup'"
stop(f, configuration, secrets)
fetch.assert_called_with(f, configuration, secrets)
assert client.virtual_machines.begin_power_off.call_count == 1
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
def test_stop_two_machines(init, fetch):
client = MagicMock()
init.return_value = client
fetch.return_value = [MACHINE_ALPHA, MACHINE_BETA]
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
f = "where resourceGroup=='myresourcegroup' | sample 2"
stop(f, configuration, secrets)
fetch.assert_called_with(f, configuration, secrets)
assert client.virtual_machines.begin_power_off.call_count == 2
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
def test_restart_one_machine(init, fetch):
client = MagicMock()
init.return_value = client
fetch.return_value = [MACHINE_ALPHA]
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
f = "where resourceGroup=='myresourcegroup'"
restart(f, configuration, secrets)
fetch.assert_called_with(f, configuration, secrets)
assert client.virtual_machines.begin_restart.call_count == 1
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
def test_restart_two_machines(init, fetch):
client = MagicMock()
init.return_value = client
fetch.return_value = [MACHINE_ALPHA, MACHINE_BETA]
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
f = "where resourceGroup=='myresourcegroup' | sample 2"
restart(f, configuration, secrets)
fetch.assert_called_with(f, configuration, secrets)
assert client.virtual_machines.begin_restart.call_count == 2
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
@patch.object(pdchaosazure.common.compute.command, 'run', autospec=True)
def test_stress_cpu(mocked_command_run, mocked_init_client, fetch):
# arrange mocks
machine = machine_provider.default()
fetch.return_value = [machine]
mocked_client = MagicMock(spec=ComputeManagementClient)
mocked_init_client.return_value = mocked_client
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
duration = 60
# act
stress_cpu(
filter="where name=='some_linux_machine'", duration=duration, configuration=configuration, secrets=secrets)
# assert
fetch.assert_called_with("where name=='some_linux_machine'", configuration, secrets)
mocked_command_run.assert_called_with(machine['resourceGroup'], machine, parameters=ANY, client=mocked_client)
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
@patch.object(pdchaosazure.common.compute.command, 'prepare_path', autospec=True)
@patch.object(pdchaosazure.common.compute.command, 'run', autospec=True)
def test_fill_disk(mocked_command_run, mocked_command_prepare_path, mocked_init_client, fetch):
# arrange mocks
mocked_command_prepare_path.return_value = '/root/burn/hard'
machine = machine_provider.default()
fetch.return_value = [machine]
mocked_client = MagicMock(spec=ComputeManagementClient)
mocked_init_client.return_value = mocked_client
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
duration = 60
# act
fill_disk(filter="where name=='some_linux_machine'", duration=duration, size=1000, path='/root/burn/hard',
configuration=configuration, secrets=secrets)
# assert
fetch.assert_called_with("where name=='some_linux_machine'", configuration, secrets)
mocked_command_run.assert_called_with(machine['resourceGroup'], machine, parameters=ANY, client=mocked_client)
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
@patch.object(pdchaosazure.common.compute.command, 'run', autospec=True)
def test_network_latency(mocked_command_run, mocked_init_client, fetch):
# arrange mocks
machine = machine_provider.default()
machines = [machine]
fetch.return_value = machines
mocked_client = MagicMock(spec=ComputeManagementClient)
mocked_init_client.return_value = mocked_client
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
duration = 60
# act
network_latency(
filter="where name=='some_linux_machine'", duration=duration, delay=200, jitter=50,
configuration=configuration, secrets=secrets)
# assert
fetch.assert_called_with("where name=='some_linux_machine'", configuration, secrets)
mocked_command_run.assert_called_with(machine['resourceGroup'], machine, parameters=ANY, client=mocked_client)
@patch('pdchaosazure.vm.actions.fetch_machines', autospec=True)
@patch('pdchaosazure.vm.actions.client.init', autospec=True)
@patch.object(pdchaosazure.common.compute.command, 'run', autospec=True)
def test_burn_io(mocked_command_run, mocked_init_client, fetch):
# arrange mocks
machine = machine_provider.default()
machines = [machine]
fetch.return_value = machines
configuration = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_via_service_principal()
mocked_client = MagicMock(spec=ComputeManagementClient)
mocked_init_client.return_value = mocked_client
duration = 60
# act
burn_io(filter="where name=='some_linux_machine'", duration=duration, configuration=configuration, secrets=secrets)
# assert
fetch.assert_called_with("where name=='some_linux_machine'", configuration, secrets)
mocked_command_run.assert_called_with(machine['resourceGroup'], machine, parameters=ANY, client=mocked_client)
| 36.468354
| 119
| 0.763855
| 1,009
| 8,643
| 6.263627
| 0.102081
| 0.047468
| 0.069778
| 0.082278
| 0.896361
| 0.896361
| 0.891139
| 0.891139
| 0.876266
| 0.876266
| 0
| 0.003475
| 0.134213
| 8,643
| 236
| 120
| 36.622881
| 0.841107
| 0.011454
| 0
| 0.745098
| 0
| 0
| 0.168093
| 0.132458
| 0
| 0
| 0
| 0
| 0.130719
| 1
| 0.071895
| false
| 0
| 0.03268
| 0.006536
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2995c3648f7190ada0c724bc9a40968641b32f3f
| 207
|
py
|
Python
|
officialWebsite/misc/admin.py
|
rdotjain/officialWebsite
|
d5f37cf840c303ecbc47aa12bd76781bb725422d
|
[
"MIT"
] | 1
|
2021-11-21T04:34:13.000Z
|
2021-11-21T04:34:13.000Z
|
officialWebsite/misc/admin.py
|
raghavTinker/officialWebsite
|
bf02249894b7d26b8b3f8062f2fc75556a52ae1e
|
[
"MIT"
] | null | null | null |
officialWebsite/misc/admin.py
|
raghavTinker/officialWebsite
|
bf02249894b7d26b8b3f8062f2fc75556a52ae1e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from . import models
admin.site.register(models.Achievement)
admin.site.register(models.FAQ)
admin.site.register(models.ContactRequest)
admin.site.register(models.Sponsor)
| 23
| 42
| 0.826087
| 28
| 207
| 6.107143
| 0.428571
| 0.210526
| 0.397661
| 0.538012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067633
| 207
| 8
| 43
| 25.875
| 0.88601
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
29dc1d23848492ae3acbecb27666c2f7540270bc
| 2,245
|
py
|
Python
|
curation_projects/kannada_audio/dvg_jnaapaka/__main__.py
|
sanskrit-coders/audio_curation
|
363cfdab94cac0bc72096efa4649d0e075f55f51
|
[
"MIT"
] | null | null | null |
curation_projects/kannada_audio/dvg_jnaapaka/__main__.py
|
sanskrit-coders/audio_curation
|
363cfdab94cac0bc72096efa4649d0e075f55f51
|
[
"MIT"
] | 1
|
2018-10-06T20:36:13.000Z
|
2018-10-06T20:36:13.000Z
|
curation_projects/kannada_audio/dvg_jnaapaka/__main__.py
|
sanskrit-coders/audio_curation
|
363cfdab94cac0bc72096efa4649d0e075f55f51
|
[
"MIT"
] | null | null | null |
from curation_projects.kannada_audio import dvg_jnaapaka
# dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಹಲವಾರು-ಸಾರ್ವಜನಿಕರು jnApaka-chitra-shAle halavAru-sArvajanikaru", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-halavaru-sArvajanikaru"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಹೃದಯಸಮ್ಪನ್ನರು dvg-jnapaka-chitra-shaale-hRdaya-sampannaru", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-hRdaya-sampannaru"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಕಲೋಪಾಸಕರು dvg-jnapaka-chitra-shaale-kalopAsakaru", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-kalopAsakaru"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಮೈಸೂರಿನ ದಿವಾನರು dvg-jnapaka-chitra-shaale-maisUrina-dIvAnaru", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-maisUrina-dIvAnaru"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಸಾಹಿತಿ-ಸಜ್ಜನ-ಸಾರ್ವಜನಿಕರು dvg-jnapaka-chitra-shaale-sAhiti-sajjana-sArvajanikaru", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-sAhiti-sajjana-sArvajanikaru"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಸಾಹಿತ್ಯೋಪಾಸಕರು dvg-jnapaka-chitra-shaale-sAhityopAsakaru", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-sAhityopAsakaru"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಸಂಕೀರ್ಣ ಸ್ಮೃತಿ ಸಮ್ಪುಟ dvg-jnapaka-chitra-shaale-sankIrNa-smRti-sampuTa", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-sankIrNa-smRti-sampuTa"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ವೈದಿಕ-ಧರ್ಮ-ಸಮ್ಪ್ರದಾಯಸ್ಥರು dvg-jnapaka-chitra-shaale-vaidika-dharma-sampradAyastharu", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "dvg-jnapaka-chitra-shaale-vaidika-dharma-sampradAyastharu"], dry_run=False)
dvg_jnaapaka.upload_volume(title = "ಜ್ಞಾಪಕಚಿತ್ರಶಾಲೆ ಬ್ರಹ್ಮಪುರಿಯ ಭಿಕ್ಷುಕ dvg-gaNesh-brahmapuriya-bhixuka", repo_paths=["/home/vvasuki/kannada-audio/dvg-jnApaka/" + "brahmapuriya-bhixuka"], dry_run=False, description = "ರಾ ಗಣೇಶ R Ganesh")
| 140.3125
| 271
| 0.766147
| 491
| 2,245
| 3.651731
| 0.193483
| 0.133854
| 0.133854
| 0.184049
| 0.854992
| 0.824317
| 0.759621
| 0.653653
| 0.617959
| 0.585611
| 0
| 0
| 0.045434
| 2,245
| 15
| 272
| 149.666667
| 0.784414
| 0.10735
| 0
| 0
| 0
| 0.888889
| 0.656172
| 0.524738
| 0.888889
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4b1b471b666a471f6af57df1472da56413021991
| 1,124
|
py
|
Python
|
tests/test_16.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_16.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_16.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 16. 3Sum Closest
"""
@pytest.fixture(scope="session")
def init_variables_16():
from src.leetcode_16_3sum_closest import Solution
solution = Solution()
def _init_variables_16():
return solution
yield _init_variables_16
class TestClass16:
def test_solution_0(self, init_variables_16):
assert init_variables_16().threeSumClosest([-1, 2, 1, -4], 1) == 2
def test_solution_1(self, init_variables_16):
assert init_variables_16().threeSumClosest([0, 0, 0], 1) == 0
#!/usr/bin/env python
import pytest
"""
Test 16. 3Sum Closest
"""
@pytest.fixture(scope="session")
def init_variables_16():
from src.leetcode_16_3sum_closest import Solution
solution = Solution()
def _init_variables_16():
return solution
yield _init_variables_16
class TestClass16:
def test_solution_0(self, init_variables_16):
assert init_variables_16().threeSumClosest([-1, 2, 1, -4], 1) == 2
def test_solution_1(self, init_variables_16):
assert init_variables_16().threeSumClosest([0, 0, 0], 1) == 0
| 19.719298
| 74
| 0.692171
| 154
| 1,124
| 4.753247
| 0.201299
| 0.248634
| 0.286885
| 0.098361
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.077093
| 0.192171
| 1,124
| 56
| 75
| 20.071429
| 0.729075
| 0.035587
| 0
| 1
| 0
| 0
| 0.013672
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.307692
| false
| 0
| 0.153846
| 0.076923
| 0.615385
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 11
|
4b1c309c79cea422cb42796d5ed450a4c2a749c5
| 79
|
py
|
Python
|
IA/Python/1/1.1/2.py
|
worthl3ss/random-small
|
ffb60781f57eb865acbd81aaa07056046bad32fe
|
[
"MIT"
] | 1
|
2022-02-23T12:47:00.000Z
|
2022-02-23T12:47:00.000Z
|
IA/Python/1/1.1/2.py
|
worthl3ss/random-small
|
ffb60781f57eb865acbd81aaa07056046bad32fe
|
[
"MIT"
] | null | null | null |
IA/Python/1/1.1/2.py
|
worthl3ss/random-small
|
ffb60781f57eb865acbd81aaa07056046bad32fe
|
[
"MIT"
] | null | null | null |
a=2
b=3
print(a==b)
print(a!=b)
print(a>b)
print(a<b)
print(a>=b)
print(a<=b)
| 8.777778
| 12
| 0.582278
| 22
| 79
| 2.090909
| 0.227273
| 0.782609
| 0.913043
| 1.304348
| 0.913043
| 0.913043
| 0.913043
| 0.913043
| 0.913043
| 0.913043
| 0
| 0.028571
| 0.113924
| 79
| 8
| 13
| 9.875
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 12
|
d9b0ce5ee4f0d710978e4eb051209e148c2c1c89
| 85,318
|
py
|
Python
|
pandadoc_client/api/documents_api.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | 27
|
2021-11-16T11:30:13.000Z
|
2022-03-17T08:56:18.000Z
|
pandadoc_client/api/documents_api.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | null | null | null |
pandadoc_client/api/documents_api.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | 2
|
2021-12-16T13:38:15.000Z
|
2022-01-09T00:38:00.000Z
|
"""
PandaDoc Public API
PandaDoc Public API documentation # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pandadoc_client.api_client import ApiClient, Endpoint as _Endpoint
from pandadoc_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from pandadoc_client.model.document_create_link_request import DocumentCreateLinkRequest
from pandadoc_client.model.document_create_link_response import DocumentCreateLinkResponse
from pandadoc_client.model.document_create_request import DocumentCreateRequest
from pandadoc_client.model.document_create_response import DocumentCreateResponse
from pandadoc_client.model.document_details_response import DocumentDetailsResponse
from pandadoc_client.model.document_list_response import DocumentListResponse
from pandadoc_client.model.document_ordering_fields_enum import DocumentOrderingFieldsEnum
from pandadoc_client.model.document_send_request import DocumentSendRequest
from pandadoc_client.model.document_send_response import DocumentSendResponse
from pandadoc_client.model.document_status_change_request import DocumentStatusChangeRequest
from pandadoc_client.model.document_status_request_enum import DocumentStatusRequestEnum
from pandadoc_client.model.document_status_response import DocumentStatusResponse
from pandadoc_client.model.document_transfer_all_ownership_request import DocumentTransferAllOwnershipRequest
from pandadoc_client.model.document_transfer_ownership_request import DocumentTransferOwnershipRequest
from pandadoc_client.model.linked_object_create_request import LinkedObjectCreateRequest
from pandadoc_client.model.linked_object_create_response import LinkedObjectCreateResponse
from pandadoc_client.model.linked_object_list_response import LinkedObjectListResponse
class DocumentsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.change_document_status_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/status',
'operation_id': 'change_document_status',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'id',
'document_status_change_request',
],
'required': [
'id',
'document_status_change_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'document_status_change_request':
(DocumentStatusChangeRequest,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'document_status_change_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'multipart/form-data'
]
},
api_client=api_client
)
self.create_document_endpoint = _Endpoint(
settings={
'response_type': (DocumentCreateResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents',
'operation_id': 'create_document',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'document_create_request',
'editor_ver',
],
'required': [
'document_create_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'document_create_request':
(DocumentCreateRequest,),
'editor_ver':
(str,),
},
'attribute_map': {
'editor_ver': 'editor_ver',
},
'location_map': {
'document_create_request': 'body',
'editor_ver': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'multipart/form-data'
]
},
api_client=api_client
)
self.create_document_link_endpoint = _Endpoint(
settings={
'response_type': (DocumentCreateLinkResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/session',
'operation_id': 'create_document_link',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'document_create_link_request',
],
'required': [
'id',
'document_create_link_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'document_create_link_request':
(DocumentCreateLinkRequest,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'document_create_link_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.create_linked_object_endpoint = _Endpoint(
settings={
'response_type': (LinkedObjectCreateResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/linked-objects',
'operation_id': 'create_linked_object',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'linked_object_create_request',
],
'required': [
'id',
'linked_object_create_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'linked_object_create_request':
(LinkedObjectCreateRequest,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'linked_object_create_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_document_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}',
'operation_id': 'delete_document',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_linked_object_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/linked-objects/{linked_object_id}',
'operation_id': 'delete_linked_object',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
'linked_object_id',
],
'required': [
'id',
'linked_object_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'linked_object_id':
(str,),
},
'attribute_map': {
'id': 'id',
'linked_object_id': 'linked_object_id',
},
'location_map': {
'id': 'path',
'linked_object_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.details_document_endpoint = _Endpoint(
settings={
'response_type': (DocumentDetailsResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/details',
'operation_id': 'details_document',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.download_document_endpoint = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/download',
'operation_id': 'download_document',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'watermark_color',
'watermark_font_size',
'watermark_opacity',
'watermark_text',
'separate_files',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'watermark_color':
(str,),
'watermark_font_size':
(int,),
'watermark_opacity':
(float,),
'watermark_text':
(str,),
'separate_files':
(bool,),
},
'attribute_map': {
'id': 'id',
'watermark_color': 'watermark_color',
'watermark_font_size': 'watermark_font_size',
'watermark_opacity': 'watermark_opacity',
'watermark_text': 'watermark_text',
'separate_files': 'separate_files',
},
'location_map': {
'id': 'path',
'watermark_color': 'query',
'watermark_font_size': 'query',
'watermark_opacity': 'query',
'watermark_text': 'query',
'separate_files': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/pdf',
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.download_protected_document_endpoint = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/download-protected',
'operation_id': 'download_protected_document',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'separate_files',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'separate_files':
(bool,),
},
'attribute_map': {
'id': 'id',
'separate_files': 'separate_files',
},
'location_map': {
'id': 'path',
'separate_files': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/pdf',
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_documents_endpoint = _Endpoint(
settings={
'response_type': (DocumentListResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents',
'operation_id': 'list_documents',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'completed_from',
'completed_to',
'contact_id',
'count',
'created_from',
'created_to',
'deleted',
'id',
'folder_uuid',
'form_id',
'membership_id',
'metadata',
'modified_from',
'modified_to',
'order_by',
'page',
'q',
'status',
'status__ne',
'tag',
'template_id',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
'page',
]
},
root_map={
'validations': {
('page',): {
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'completed_from':
(str,),
'completed_to':
(str,),
'contact_id':
(str,),
'count':
(int,),
'created_from':
(str,),
'created_to':
(str,),
'deleted':
(bool,),
'id':
(str,),
'folder_uuid':
(str,),
'form_id':
(str,),
'membership_id':
(str,),
'metadata':
([str],),
'modified_from':
(str,),
'modified_to':
(str,),
'order_by':
(DocumentOrderingFieldsEnum,),
'page':
(int,),
'q':
(str,),
'status':
(DocumentStatusRequestEnum,),
'status__ne':
(DocumentStatusRequestEnum,),
'tag':
(str,),
'template_id':
(str,),
},
'attribute_map': {
'completed_from': 'completed_from',
'completed_to': 'completed_to',
'contact_id': 'contact_id',
'count': 'count',
'created_from': 'created_from',
'created_to': 'created_to',
'deleted': 'deleted',
'id': 'id',
'folder_uuid': 'folder_uuid',
'form_id': 'form_id',
'membership_id': 'membership_id',
'metadata': 'metadata',
'modified_from': 'modified_from',
'modified_to': 'modified_to',
'order_by': 'order_by',
'page': 'page',
'q': 'q',
'status': 'status',
'status__ne': 'status__ne',
'tag': 'tag',
'template_id': 'template_id',
},
'location_map': {
'completed_from': 'query',
'completed_to': 'query',
'contact_id': 'query',
'count': 'query',
'created_from': 'query',
'created_to': 'query',
'deleted': 'query',
'id': 'query',
'folder_uuid': 'query',
'form_id': 'query',
'membership_id': 'query',
'metadata': 'query',
'modified_from': 'query',
'modified_to': 'query',
'order_by': 'query',
'page': 'query',
'q': 'query',
'status': 'query',
'status__ne': 'query',
'tag': 'query',
'template_id': 'query',
},
'collection_format_map': {
'metadata': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_linked_objects_endpoint = _Endpoint(
settings={
'response_type': (LinkedObjectListResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/linked-objects',
'operation_id': 'list_linked_objects',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.send_document_endpoint = _Endpoint(
settings={
'response_type': (DocumentSendResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/send',
'operation_id': 'send_document',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'document_send_request',
],
'required': [
'id',
'document_send_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'document_send_request':
(DocumentSendRequest,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'document_send_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.status_document_endpoint = _Endpoint(
settings={
'response_type': (DocumentStatusResponse,),
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}',
'operation_id': 'status_document',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.transfer_all_documents_ownership_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/ownership',
'operation_id': 'transfer_all_documents_ownership',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'document_transfer_all_ownership_request',
],
'required': [
'document_transfer_all_ownership_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'document_transfer_all_ownership_request':
(DocumentTransferAllOwnershipRequest,),
},
'attribute_map': {
},
'location_map': {
'document_transfer_all_ownership_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.transfer_document_ownership_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey',
'oauth2'
],
'endpoint_path': '/public/v1/documents/{id}/ownership',
'operation_id': 'transfer_document_ownership',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'id',
'document_transfer_ownership_request',
],
'required': [
'id',
'document_transfer_ownership_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'document_transfer_ownership_request':
(DocumentTransferOwnershipRequest,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'document_transfer_ownership_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def change_document_status(
self,
id,
document_status_change_request,
**kwargs
):
"""Document status change # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_document_status(id, document_status_change_request, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
document_status_change_request (DocumentStatusChangeRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['document_status_change_request'] = \
document_status_change_request
return self.change_document_status_endpoint.call_with_http_info(**kwargs)
def create_document(
self,
document_create_request,
**kwargs
):
"""Create document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_document(document_create_request, async_req=True)
>>> result = thread.get()
Args:
document_create_request (DocumentCreateRequest): Use a PandaDoc template or an existing PDF to create a document. See the creation request examples [by template](/schemas/DocumentCreateByTemplateRequest) and [by pdf](/schemas/DocumentCreateByPdfRequest)
Keyword Args:
editor_ver (str): Set this parameter as `ev1` if you want to create a document from PDF with Classic Editor when both editors are enabled for the workspace.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentCreateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['document_create_request'] = \
document_create_request
return self.create_document_endpoint.call_with_http_info(**kwargs)
def create_document_link(
self,
id,
document_create_link_request,
**kwargs
):
"""Create a Document Link # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_document_link(id, document_create_link_request, async_req=True)
>>> result = thread.get()
Args:
id (str): Document ID
document_create_link_request (DocumentCreateLinkRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentCreateLinkResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['document_create_link_request'] = \
document_create_link_request
return self.create_document_link_endpoint.call_with_http_info(**kwargs)
def create_linked_object(
self,
id,
linked_object_create_request,
**kwargs
):
"""Create Linked Object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_linked_object(id, linked_object_create_request, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
linked_object_create_request (LinkedObjectCreateRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LinkedObjectCreateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['linked_object_create_request'] = \
linked_object_create_request
return self.create_linked_object_endpoint.call_with_http_info(**kwargs)
def delete_document(
self,
id,
**kwargs
):
"""Delete document by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_document(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Document ID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.delete_document_endpoint.call_with_http_info(**kwargs)
def delete_linked_object(
self,
id,
linked_object_id,
**kwargs
):
"""Delete Linked Object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_linked_object(id, linked_object_id, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
linked_object_id (str): Specify linked object ID.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['linked_object_id'] = \
linked_object_id
return self.delete_linked_object_endpoint.call_with_http_info(**kwargs)
def details_document(
self,
id,
**kwargs
):
"""Document details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.details_document(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Document ID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentDetailsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.details_document_endpoint.call_with_http_info(**kwargs)
def download_document(
self,
id,
**kwargs
):
"""Document download # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_document(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
Keyword Args:
watermark_color (str): HEX code (for example `#FF5733`).. [optional]
watermark_font_size (int): Font size of the watermark.. [optional]
watermark_opacity (float): In range 0.0-1.0. [optional]
watermark_text (str): Specify watermark text.. [optional]
separate_files (bool): Set as `true` if you want to receive a zip file with all documents in separate when document transaction contains more than 1.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.download_document_endpoint.call_with_http_info(**kwargs)
def download_protected_document(
self,
id,
**kwargs
):
"""Download document protected # noqa: E501
Download a signed PDF of a completed document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_protected_document(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
Keyword Args:
separate_files (bool): Set as `true` if you want to receive a zip file with all documents in separate when document transaction contains more than 1.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.download_protected_document_endpoint.call_with_http_info(**kwargs)
def list_documents(
self,
**kwargs
):
"""List documents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_documents(async_req=True)
>>> result = thread.get()
Keyword Args:
completed_from (str): Return results where the `date_completed` field (ISO 8601) is greater than or equal to this value.. [optional]
completed_to (str): Return results where the `date_completed` field (ISO 8601) is less than or equal to this value.. [optional]
contact_id (str): Returns results where 'contact_id' is present in document as recipient or approver. [optional]
count (int): Specify how many document results to return. Default is 50 documents, maximum is 100 documents.. [optional]
created_from (str): Return results where the `date_created` field (ISO 8601) is greater than or equal to this value.. [optional]
created_to (str): Return results where the `date_created` field (ISO 8601) is less than this value.. [optional]
deleted (bool): Returns only the deleted documents.. [optional]
id (str): [optional]
folder_uuid (str): The UUID of the folder where the documents are stored.. [optional]
form_id (str): Specify the form used for documents creation. This parameter can't be used with template_id.. [optional]
membership_id (str): Returns results where 'membership_id' is present in document as owner (should be member uuid). [optional]
metadata ([str]): Specify metadata to filter by in the format of `metadata_{metadata-key}={metadata-value}` such as `metadata_opportunity_id=2181432`. The `metadata_` prefix is always required.. [optional]
modified_from (str): Return results where the `date_modified` field (iso-8601) is greater than or equal to this value.. [optional]
modified_to (str): Return results where the `date_modified` field (iso-8601) is less than this value.. [optional]
order_by (DocumentOrderingFieldsEnum): Specify the order of documents to return. Use `value` (for example, `date_created`) for ASC and `-value` (for example, `-date_created`) for DESC.. [optional]
page (int): Specify which page of the dataset to return.. [optional]
q (str): Search query. Filter by document reference number (this token is stored on the template level) or name.. [optional]
status (DocumentStatusRequestEnum): Specify the status of documents to return. * 0: document.draft * 1: document.sent * 2: document.completed * 3: document.uploaded * 4: document.error * 5: document.viewed * 6: document.waiting_approval * 7: document.approved * 8: document.rejected * 9: document.waiting_pay * 10: document.paid * 11: document.voided * 12: document.declined * 13: document.external_review . [optional]
status__ne (DocumentStatusRequestEnum): Specify the status of documents to return (exclude). * 0: document.draft * 1: document.sent * 2: document.completed * 3: document.uploaded * 4: document.error * 5: document.viewed * 6: document.waiting_approval * 7: document.approved * 8: document.rejected * 9: document.waiting_pay * 10: document.paid * 11: document.voided * 12: document.declined * 13: document.external_review . [optional]
tag (str): Search tag. Filter by document tag.. [optional]
template_id (str): Specify the template used for documents creation. Parameter can't be used with form_id.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentListResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.list_documents_endpoint.call_with_http_info(**kwargs)
def list_linked_objects(
self,
id,
**kwargs
):
"""List Linked Objects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_linked_objects(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LinkedObjectListResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.list_linked_objects_endpoint.call_with_http_info(**kwargs)
def send_document(
self,
id,
document_send_request,
**kwargs
):
"""Send Document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_document(id, document_send_request, async_req=True)
>>> result = thread.get()
Args:
id (str): Document ID
document_send_request (DocumentSendRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentSendResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['document_send_request'] = \
document_send_request
return self.send_document_endpoint.call_with_http_info(**kwargs)
def status_document(
self,
id,
**kwargs
):
"""Document status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.status_document(id, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DocumentStatusResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.status_document_endpoint.call_with_http_info(**kwargs)
def transfer_all_documents_ownership(
self,
document_transfer_all_ownership_request,
**kwargs
):
"""Transfer all documents ownership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.transfer_all_documents_ownership(document_transfer_all_ownership_request, async_req=True)
>>> result = thread.get()
Args:
document_transfer_all_ownership_request (DocumentTransferAllOwnershipRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['document_transfer_all_ownership_request'] = \
document_transfer_all_ownership_request
return self.transfer_all_documents_ownership_endpoint.call_with_http_info(**kwargs)
def transfer_document_ownership(
self,
id,
document_transfer_ownership_request,
**kwargs
):
"""Update document ownership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.transfer_document_ownership(id, document_transfer_ownership_request, async_req=True)
>>> result = thread.get()
Args:
id (str): Specify document ID.
document_transfer_ownership_request (DocumentTransferOwnershipRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['document_transfer_ownership_request'] = \
document_transfer_ownership_request
return self.transfer_document_ownership_endpoint.call_with_http_info(**kwargs)
| 38.379667
| 472
| 0.503329
| 7,854
| 85,318
| 5.22358
| 0.044181
| 0.029615
| 0.019012
| 0.019744
| 0.85068
| 0.81307
| 0.783503
| 0.773851
| 0.75713
| 0.733632
| 0
| 0.003776
| 0.413371
| 85,318
| 2,222
| 473
| 38.39694
| 0.815924
| 0.377165
| 0
| 0.625837
| 1
| 0
| 0.241387
| 0.066267
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01071
| false
| 0
| 0.014056
| 0
| 0.035475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9f65c1445c27e715f07124473fcb9fa94dcec8c
| 4,453
|
py
|
Python
|
Python/Tests/TestShuntingYard.py
|
JonathanScala/Algorithms-and-Data-Structures
|
6d5ffe3d21607d59dc940aafc18c335a5b628ff5
|
[
"MIT"
] | null | null | null |
Python/Tests/TestShuntingYard.py
|
JonathanScala/Algorithms-and-Data-Structures
|
6d5ffe3d21607d59dc940aafc18c335a5b628ff5
|
[
"MIT"
] | null | null | null |
Python/Tests/TestShuntingYard.py
|
JonathanScala/Algorithms-and-Data-Structures
|
6d5ffe3d21607d59dc940aafc18c335a5b628ff5
|
[
"MIT"
] | null | null | null |
import unittest
import sys
import os
class TestConvertMethod(unittest.TestCase):
def test_basic_addition(self):
result = PostFix.convert('x+y')
self.assertEqual(result, 'xy+', "Cannot add two variables")
def test_basic_subtraction(self):
result = PostFix.convert('x-y')
self.assertEqual(result, 'xy-', "Cannot subtract two variables")
def test_basic_multiplication(self):
result = PostFix.convert('x*y')
self.assertEqual(result, 'xy*', "Cannot Multiply two variables")
def test_basic_multiplication(self):
result = PostFix.convert('x/y')
self.assertEqual(result, 'xy/', "Cannot Multiply two variables")
def test_division_and_subtraction(self):
result = PostFix.convert('(x)/(y-z)')
self.assertEqual(result, 'xyz-/',
"Cannot Divide and Subtract Simultaneously")
def test_division_and_Multiplication(self):
result = PostFix.convert('(x)/(y*z)')
self.assertEqual(result, 'xyz*/',
"Cannot Divide and Multiply Simultaneously")
def test_division_and_Addition(self):
result = PostFix.convert('(x)/(y+z)')
self.assertEqual(result, 'xyz+/',
"Cannot Divide and Add Simultaneously")
def test_Multiplication_and_Subtraction(self):
result = PostFix.convert('x*(y-z)')
self.assertEqual(result, 'xyz-*',
"Cannot Multiply and Subtract Simultaneously")
def test_Multiplication_and_Addition(self):
result = PostFix.convert('x*(y+z)')
self.assertEqual(result, 'xyz+*',
"Cannot Multiply and Add Simultaneously")
def test_Addition_and_Subtraction(self):
result = PostFix.convert('x+(y-z)')
self.assertEqual(result, 'xyz-+',
"Cannot Add and Subtract Simultaneously")
def test_all_operators(self):
result = PostFix.convert('((q+x)*(y-z))/r')
self.assertEqual(result, 'qx+yz-*r/',
"Cannot Multiply and Add Simultaneously")
class TestEvaluateMethod(unittest.TestCase):
def test_basic_addition(self):
result = PostFix.evaluate('1 2 +')
self.assertEqual(result, 3.0, 'Cannot Add two Integers')
def test_basic_subtraction(self):
result = PostFix.evaluate('2 4 -')
self.assertEqual(result, -2.0, "Cannot subtract two Integers")
def test_basic_multiplication(self):
result = PostFix.evaluate('3 4 *')
self.assertEqual(result, 12.0, "Cannot Multiply two Integers")
def test_basic_multiplication(self):
result = PostFix.evaluate('4 2 /')
self.assertEqual(result, 2.0, "Cannot Multiply two Integers")
def test_division_and_subtraction(self):
result = PostFix.evaluate('1 2 1 - /')
self.assertEqual(result, 1.0,
"Cannot Divide and Subtract Simultaneously")
def test_division_and_Multiplication(self):
result = PostFix.evaluate('2 2 2 * /')
self.assertEqual(result, 0.5,
"Cannot Divide and Multiply Simultaneously")
def test_division_and_Addition(self):
result = PostFix.evaluate('1 2 1 + /')
self.assertEqual(result, float(1/3),
"Cannot Divide and Add Simultaneously")
def test_Multiplication_and_Subtraction(self):
result = PostFix.evaluate('1 - 2 6 - *')
self.assertEqual(result, 4.0,
"Cannot Multiply and Subtract Simultaneously")
def test_Multiplication_and_Addition(self):
result = PostFix.evaluate('5 4 1 + *')
self.assertEqual(result, 25.0,
"Cannot Multiply and Add Simultaneously")
def test_Addition_and_Subtraction(self):
result = PostFix.evaluate('1 - 4 3 - +')
self.assertEqual(result, 0.0,
"Cannot Add and Subtract Simultaneously")
def test_all_operators(self):
result = PostFix.evaluate('1 2 + 4 3 - * 2 /')
self.assertEqual(result, 1.5,
"Cannot Compute with all Operators Simultaneously")
if(__name__ == "__main__"):
sys.path.append(os.path.abspath('../Algorithms'))
from shuntingYard import PostFix
unittest.main(exit=False)
| 37.737288
| 77
| 0.602291
| 487
| 4,453
| 5.37577
| 0.13347
| 0.058824
| 0.142857
| 0.10084
| 0.821238
| 0.79259
| 0.766234
| 0.719251
| 0.701299
| 0.667303
| 0
| 0.017252
| 0.284078
| 4,453
| 117
| 78
| 38.059829
| 0.803952
| 0
| 0
| 0.393258
| 0
| 0
| 0.23524
| 0
| 0
| 0
| 0
| 0
| 0.247191
| 1
| 0.247191
| false
| 0
| 0.044944
| 0
| 0.314607
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9fbd70edde2969b0b86a3b1cba8a3b23b031178
| 2,345
|
py
|
Python
|
NN_models/old.py
|
ThomasMrY/ActivationFunctionDemo
|
1f75c7964e354e7593e1fe54932877feb2961488
|
[
"MIT"
] | 12
|
2018-10-29T02:07:43.000Z
|
2022-03-18T09:56:34.000Z
|
NN_models/old.py
|
ThomasMrY/Activation_function_demo
|
1f75c7964e354e7593e1fe54932877feb2961488
|
[
"MIT"
] | null | null | null |
NN_models/old.py
|
ThomasMrY/Activation_function_demo
|
1f75c7964e354e7593e1fe54932877feb2961488
|
[
"MIT"
] | 6
|
2019-05-31T08:54:39.000Z
|
2021-08-15T03:09:33.000Z
|
import pickle
import torch
import numpy as np
def tanh_apx(input_d,file_name):
file = open('./process_data\\'+file_name+'.pkl','rb')
x_linspace = pickle.load(file)
values = pickle.load(file)
num = len(values)
output = torch.zeros_like(input_d)
temp_1 = x_linspace[-1]
output_1 = torch.where(torch.lt(input_d, temp_1), torch.zeros_like(input_d), torch.ones_like(input_d))
alpha = 10 * output_1
input_d = torch.add(input_d, alpha)
output = torch.add(output, output_1)
temp_1 = x_linspace[0]
output_1 = torch.where(torch.lt(input_d, temp_1), torch.ones_like(input_d), torch.zeros_like(input_d))
alpha = 10 * output_1
input_d = torch.add(input_d, alpha)
output = torch.add(output, -1 * output_1)
for t in range(1, num-1):
temp_1 = x_linspace[t]
temp_2 = values[t-1]
output_1 = torch.where(torch.lt(input_d, temp_1), torch.ones_like(input_d),
torch.zeros_like(input_d))
alpha = 10 * output_1
input_d = torch.add(input_d, alpha)
output = torch.add(temp_2 * output_1, output)
return output
def selu_apx(input_d,file_name):
def selu(x,name = "selu"):
scale = 1.0507009873554804934193349852946
alpha = 1.6732632423543772848170429916717
return scale * tf.where(x > 0.0, x, alpha * tf.exp(x) - alpha)
file = open('./process_data\\'+file_name+'.pkl','rb')
x_linspace = pickle.load(file)
values = pickle.load(file)
num = len(values)
output = torch.zeros_like(input_d)
temp_1 = x_linspace[-1]
output_1 = torch.where(torch.lt(input_d, temp_1), torch.zeros_like(input_d), input_d)
output_temp = torch.where(torch.lt(input_d, temp_1), torch.zeros_like(input_d),
torch.ones_like(input_d))
alpha = 10 * output_temp
input_d = torch.add(input_d, alpha)
output = torch.add(output, output_1)
temp_1 = x_linspace[0]
output_1 = torch.where(torch.lt(input_d, temp_1), torch.ones_like(input_d), torch.zeros_like(input_d))
alpha = 10 * output_1
input_d = torch.add(input_d, alpha)
output = torch.add(output, -1.6733 * output_1)
for t in range(1,num-1):
temp_1 = x_linspace[t]
temp_2 = values[t-1]
output_1 = torch.where(torch.lt(input_d, temp_1), torch.ones_like(input_d),
torch.zeros_like(input_d))
alpha = 10 * output_1
input_d = torch.add(input_d, alpha)
output = torch.add(temp_2 * output_1, output)
return output
| 37.222222
| 103
| 0.698934
| 402
| 2,345
| 3.818408
| 0.121891
| 0.144625
| 0.09772
| 0.111401
| 0.867101
| 0.844951
| 0.844951
| 0.844951
| 0.844951
| 0.844951
| 0
| 0.065273
| 0.163753
| 2,345
| 62
| 104
| 37.822581
| 0.717491
| 0
| 0
| 0.728814
| 0
| 0
| 0.020469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.050847
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a055946ef49ebfa9352f08b7d3890ca5f838a46
| 2,653
|
py
|
Python
|
tests/config/test_config.py
|
craftslab/langcrawler
|
33490841411b290debc71960e0bd8e71bee653c1
|
[
"Apache-2.0"
] | null | null | null |
tests/config/test_config.py
|
craftslab/langcrawler
|
33490841411b290debc71960e0bd8e71bee653c1
|
[
"Apache-2.0"
] | null | null | null |
tests/config/test_config.py
|
craftslab/langcrawler
|
33490841411b290debc71960e0bd8e71bee653c1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from langcrawler.config.config import Config, ConfigException
def test_config():
config = Config()
try:
config.pg_host = ""
except ConfigException as _:
assert True
else:
assert False
try:
config.pg_host = "127.0.0.1"
except ConfigException as _:
assert False
else:
assert True
try:
config.pg_port = 0
except ConfigException as _:
assert True
else:
assert False
try:
config.pg_port = 5432
except ConfigException as _:
assert False
else:
assert True
try:
config.pg_user = ""
except ConfigException as _:
assert True
else:
assert False
try:
config.pg_user = "postgres"
except ConfigException as _:
assert False
else:
assert True
try:
config.pg_pass = ""
except ConfigException as _:
assert True
else:
assert False
try:
config.pg_pass = "postgres"
except ConfigException as _:
assert False
else:
assert True
try:
config.redis_host = ""
except ConfigException as _:
assert True
else:
assert False
try:
config.redis_host = "127.0.0.1"
except ConfigException as _:
assert False
else:
assert True
try:
config.redis_port = 0
except ConfigException as _:
assert True
else:
assert False
try:
config.redis_port = 6379
except ConfigException as _:
assert False
else:
assert True
try:
config.redis_pass = ""
except ConfigException as _:
assert True
else:
assert False
try:
config.redis_pass = "redis"
except ConfigException as _:
assert False
else:
assert True
try:
config.repo_count = 0
except ConfigException as _:
assert True
else:
assert False
try:
config.repo_count = 1
except ConfigException as _:
assert False
else:
assert True
try:
config.repo_host = []
except ConfigException as _:
assert True
else:
assert False
try:
config.repo_host = ["gerrit"]
except ConfigException as _:
assert False
else:
assert True
try:
config.repo_lang = []
except ConfigException as _:
assert True
else:
assert False
try:
config.repo_lang = ["go"]
except ConfigException as _:
assert False
else:
assert True
| 17.925676
| 61
| 0.555974
| 274
| 2,653
| 5.233577
| 0.124088
| 0.125523
| 0.320781
| 0.404463
| 0.883543
| 0.883543
| 0.883543
| 0.883543
| 0.85007
| 0.85007
| 0
| 0.015328
| 0.385224
| 2,653
| 147
| 62
| 18.047619
| 0.863887
| 0.007916
| 0
| 0.813008
| 0
| 0
| 0.017871
| 0
| 0
| 0
| 0
| 0
| 0.325203
| 1
| 0.00813
| false
| 0.03252
| 0.00813
| 0
| 0.01626
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8a0f21cf3be7bebe7ba46cbb20a7b8a97a859e51
| 2,422
|
py
|
Python
|
connapi/api/agify.py
|
YellowFoxH4XOR/connapi
|
f7663c048856f3c402cc3e2c1a22751bae703a81
|
[
"MIT"
] | null | null | null |
connapi/api/agify.py
|
YellowFoxH4XOR/connapi
|
f7663c048856f3c402cc3e2c1a22751bae703a81
|
[
"MIT"
] | null | null | null |
connapi/api/agify.py
|
YellowFoxH4XOR/connapi
|
f7663c048856f3c402cc3e2c1a22751bae703a81
|
[
"MIT"
] | null | null | null |
import requests
import json
from connapi.data import Linker
class Agify():
def get_age(datadict,country_code=None,api_key=None):
size = len(datadict)
if(size == 1):
url = Linker.url_agify + "name=" + str(datadict[0])
else:
temp_url = ""
for i in range(0,size):
if(i<(size-1)):
temp_url += "name[]=" + str(datadict[i]) + "&"
else:
temp_url += "name[]=" + str(datadict[i])
url = Linker.url_agify + temp_url
if(country_code is not None):
url += "&country_id=" + country_code
if(api_key is not None):
url += "&apikey=" + api_key
resp = requests.get(url)
if(resp.status_code == 200):
return json.loads(resp.text)
else:
return "Error Fetching data"
def get_gender(datadict,country_code=None,api_key=None):
size = len(datadict)
if(size == 1):
url = Linker.url_genderize + "name=" + str(datadict[0])
else:
temp_url = ""
for i in range(0,size):
if(i<(size-1)):
temp_url += "name[]=" + str(datadict[i]) + "&"
else:
temp_url += "name[]=" + str(datadict[i])
url = Linker.url_genderize + temp_url
if(country_code is not None):
url += "&country_id=" + country_code
if(api_key is not None):
url += "&apikey=" + api_key
resp = requests.get(url)
if(resp.status_code == 200):
return json.loads(resp.text)
else:
return "Error Fetching data"
def get_nationality(datadict,api_key=None):
size = len(datadict)
if(size == 1):
url = Linker.url_nationalize + "name=" + str(datadict[0])
else:
temp_url = ""
for i in range(0,size):
if(i<(size-1)):
temp_url += "name[]=" + str(datadict[i]) + "&"
else:
temp_url += "name[]=" + str(datadict[i])
url = Linker.url_nationalize + temp_url
if(api_key is not None):
url += "&apikey=" + api_key
resp = requests.get(url)
if(resp.status_code == 200):
return json.loads(resp.text)
else:
return "Error Fetching data"
| 34.112676
| 69
| 0.488439
| 285
| 2,422
| 4.007018
| 0.168421
| 0.073555
| 0.118214
| 0.073555
| 0.8669
| 0.8669
| 0.8669
| 0.8669
| 0.8669
| 0.8669
| 0
| 0.013953
| 0.378613
| 2,422
| 70
| 70
| 34.6
| 0.744851
| 0
| 0
| 0.8
| 0
| 0
| 0.068126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0
| 0.046154
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a4e0657ecd54582c7cbd42a4a8aaaa43dbf531b
| 69,806
|
py
|
Python
|
gym_novel_gridworlds/novelty_wrappers.py
|
dirkmcpherson/gym-novel-gridworlds
|
5cd04fdf65e20cee51a9a6ed8eead662aad39259
|
[
"MIT"
] | 2
|
2021-08-06T08:41:34.000Z
|
2021-11-02T14:17:15.000Z
|
gym_novel_gridworlds/novelty_wrappers.py
|
dirkmcpherson/gym-novel-gridworlds
|
5cd04fdf65e20cee51a9a6ed8eead662aad39259
|
[
"MIT"
] | 3
|
2020-08-24T11:19:31.000Z
|
2021-08-23T10:12:04.000Z
|
gym_novel_gridworlds/novelty_wrappers.py
|
dirkmcpherson/gym-novel-gridworlds
|
5cd04fdf65e20cee51a9a6ed8eead662aad39259
|
[
"MIT"
] | 6
|
2020-07-28T12:52:44.000Z
|
2022-02-18T14:41:52.000Z
|
import copy
import numpy as np
import gym
from gym import error, spaces, utils
class AxeEasy(gym.core.Wrapper):
"""
Novelty wrapper to add a new item (axe) in the inventory
Using axe reduces the step_cost when Break action is used
With optional arg breakincrease, the agent will get 2 items in inventory after break action instead of 1
"""
def __init__(self, env, axe_material, breakincrease='false'):
super().__init__(env)
self.axe_name = axe_material + '_axe' # wooden_axe, iron_axe
self.env.items.add(self.axe_name)
self.env.items_id.setdefault(self.axe_name, len(self.items_id))
self.env.inventory_items_quantity.update({self.axe_name: 1})
self.env.entities.add(self.axe_name)
self.env.select_actions_id.update({'Select_' + self.axe_name: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
self.breakincrease = breakincrease
def reset(self):
obs = self.env.reset()
self.env.inventory_items_quantity.update({self.axe_name: 1})
return obs
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Break' in self.limited_actions_id, "Cannot use breakincrease novelty_arg2 because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Break']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.env.block_in_front_str not in self.env.unbreakable_items:
if self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'wooden_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
if self.breakincrease == 'true':
self.env.inventory_items_quantity[self.env.block_in_front_str] += 2
else:
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.5 # 1800.0
elif self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'iron_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
if self.breakincrease == 'true':
self.env.inventory_items_quantity[self.env.block_in_front_str] += 2
else:
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.25 # 900.0
else:
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class AxeMedium(gym.core.Wrapper):
"""
Novelty wrapper to add a new item (axe) in the map
When the agent goes near axe, axe gets into the inventory
Using axe reduces the step_cost when Break action is used
With optional arg breakincrease, the agent will get 2 items in inventory after break action instead of 1
"""
def __init__(self, env, axe_material, breakincrease='false'):
super().__init__(env)
self.axe_name = axe_material + '_axe' # wooden_axe, iron_axe
self.env.add_new_items({self.axe_name: 1})
self.env.entities.add(self.axe_name)
self.env.select_actions_id.update({'Select_' + self.axe_name: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
self.breakincrease = breakincrease
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Break' in self.limited_actions_id, "Cannot use breakincrease novelty_arg2 because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Break']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.env.block_in_front_str not in self.env.unbreakable_items:
if self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'wooden_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
if self.breakincrease == 'true':
self.env.inventory_items_quantity[self.env.block_in_front_str] += 2
else:
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.5 # 1800.0
elif self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'iron_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
if self.breakincrease == 'true':
self.env.inventory_items_quantity[self.env.block_in_front_str] += 2
else:
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.25 # 900.0
else:
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class AxeHard(gym.core.Wrapper):
"""
Novelty wrapper to add a new recipe and action to craft axe
The ingredients of axe are placed in the map
When the agent crafts axe, it goes in the inventory
Using axe reduces the step_cost to when Break action is used
With optional arg breakincrease, the agent will get 2 items in inventory after break action instead of 1
"""
def __init__(self, env, axe_material, breakincrease='false'):
super().__init__(env)
self.axe_material = axe_material
self.axe_name = self.axe_material + '_axe' # wooden_axe, iron_axe
self.env.items.add(self.axe_name)
self.env.items_id.setdefault(self.axe_name, len(self.items_id))
self.env.inventory_items_quantity.update({self.axe_name: 0})
self.env.entities.add(self.axe_name)
# Action Space
if self.axe_material == 'wooden':
axe_recipe = {'stick': 2, 'plank': 3}
elif self.axe_material == 'iron':
axe_recipe = {'stick': 2, 'iron': 3}
# adding axe's ingredients to map
for item in axe_recipe:
if item in self.env.items:
if item in self.items_quantity:
item_quantity = self.items_quantity[item]
self.env.items_quantity.update({item: item_quantity + axe_recipe[item]})
else:
self.env.items_quantity.update({item: axe_recipe[item]})
else:
self.env.add_new_items({item: axe_recipe[item]})
self.env.recipes.update({self.axe_name: {'input': axe_recipe, 'output': {self.axe_name: 1}}})
self.env.craft_actions_id.update({'Craft_' + self.axe_name: len(self.env.actions_id)})
self.env.actions_id.update({'Craft_' + self.axe_name: len(self.env.actions_id)})
self.env.select_actions_id.update({'Select_' + self.axe_name: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
self.env.action_space = spaces.Discrete(len(self.env.actions_id))
self.breakincrease = breakincrease
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Craft_' + self.axe_name in self.limited_actions_id,\
"Cannot use AxeHard novelty because you do not have " + "Craft_" + self.axe_name + " in LimitActions"
assert 'Break' in self.limited_actions_id, "Cannot use breakincrease novelty_arg2 because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Craft_' + self.axe_name]:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward, result, step_cost, message = self.craft(self.axe_name)
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
elif action_id == actions_id['Break']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.env.block_in_front_str not in self.env.unbreakable_items:
if self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'wooden_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
if self.breakincrease == 'true':
self.env.inventory_items_quantity[self.env.block_in_front_str] += 2
else:
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.5 # 1800.0
elif self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'iron_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
if self.breakincrease == 'true':
self.env.inventory_items_quantity[self.env.block_in_front_str] += 2
else:
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.25 # 900.0
else:
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
def craft(self, item_to_craft):
reward = -1 # default reward to craft in a wrong way
result = True
step_cost = 0 # default step_cost
message = ''
# Check if there are enough ingredients in the inventory
have_all_ingredients = {}
for item in self.env.recipes[item_to_craft]['input']:
if item in self.env.inventory_items_quantity and self.env.inventory_items_quantity[item] >= \
self.env.recipes[item_to_craft]['input'][item]:
have_all_ingredients[item] = True
else:
have_all_ingredients[item] = False
# If there are not enough ingredients in the inventory
if False in have_all_ingredients.values():
result = False
message = "Missing items: "
if item_to_craft == 'tree_tap':
step_cost = 360.0
elif item_to_craft == 'pogo_stick':
step_cost = 480.0
for item in have_all_ingredients:
if not have_all_ingredients[item]:
message += str(self.env.recipes[item_to_craft]['input'][item]) + ' ' + item + ', '
return reward, result, step_cost, message[:-2]
# Craft
else:
# If more than 1 ingredient needed, agent needs to be in front of crafting_table
if len(self.env.recipes[item_to_craft]['input']) > 1:
self.env.update_block_in_front()
if not self.env.block_in_front_str == 'crafting_table':
if item_to_craft == 'tree_tap':
step_cost = 720.0
elif item_to_craft == 'pogo_stick':
step_cost = 840.0
elif item_to_craft == self.axe_name:
step_cost = 600.0
result = False
message = 'Need to be in front of crafting_table'
return reward, result, step_cost, message
reward = self.reward_intermediate # default reward to craft in a good way
# Reduce ingredients from the inventory
for item in self.env.recipes[item_to_craft]['input']:
self.env.inventory_items_quantity[item] -= self.env.recipes[item_to_craft]['input'][item]
# Add item_to_craft in the inventory
self.env.inventory_items_quantity[item_to_craft] += self.env.recipes[item_to_craft]['output'][item_to_craft]
if item_to_craft == 'plank':
step_cost = 1200.0
elif item_to_craft == 'stick':
step_cost = 2400.0
elif item_to_craft == 'tree_tap':
step_cost = 7200.0
elif item_to_craft == 'pogo_stick':
step_cost = 8400.0
elif item_to_craft == self.axe_name:
step_cost = 6000.0
message = 'Crafted ' + item_to_craft
return reward, result, step_cost, message
class AxetoBreakEasy(gym.core.Wrapper):
"""
Novelty wrapper to add a new item (axe) in the inventory and requiring axe to break items
Using axe reduces the step_cost when Break action is used
"""
def __init__(self, env, axe_material):
super().__init__(env)
self.axe_name = axe_material + '_axe' # wooden_axe, iron_axe
self.env.items.add(self.axe_name)
self.env.items_id.setdefault(self.axe_name, len(self.items_id))
self.env.inventory_items_quantity.update({self.axe_name: 1})
self.env.entities.add(self.axe_name)
self.env.select_actions_id.update({'Select_' + self.axe_name: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
def reset(self):
obs = self.env.reset()
self.env.inventory_items_quantity.update({self.axe_name: 1})
return obs
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Break' in self.limited_actions_id, "Cannot use axetobreak novelty because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Break']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.env.block_in_front_str not in self.env.unbreakable_items:
if self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'wooden_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.5 # 1800.0
elif self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'iron_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.25 # 900.0
else:
result = False
message = "Cannot break without " + self.axe_name + " selected"
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class AxetoBreakMedium(gym.core.Wrapper):
"""
Novelty wrapper to add a new item (axe) in the map
When the agent goes near axe, axe gets into the inventory
Axe is required to break items
Using axe reduces the step_cost when Break action is used
"""
def __init__(self, env, axe_material):
super().__init__(env)
self.axe_name = axe_material + '_axe' # wooden_axe, iron_axe
self.env.add_new_items({self.axe_name: 1})
self.env.entities.add(self.axe_name)
self.env.select_actions_id.update({'Select_' + self.axe_name: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Break' in self.limited_actions_id, "Cannot use axetobreak novelty because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Break']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.env.block_in_front_str not in self.env.unbreakable_items:
if self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'wooden_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.5 # 1800.0
elif self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'iron_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.25 # 900.0
else:
result = False
message = "Cannot break without " + self.axe_name + " selected"
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class AxetoBreakHard(gym.core.Wrapper):
"""
Novelty wrapper to add a new recipe and action to craft axe
Agent starts with ingredients to craft an axe in the inventory
When the agent crafts axe, it goes in the inventory
Axe is required to break items
Using axe reduces the step_cost when Break action is used
"""
def __init__(self, env, axe_material):
super().__init__(env)
self.axe_material = axe_material
self.axe_name = self.axe_material + '_axe' # wooden_axe, iron_axe
self.env.items.add(self.axe_name)
self.env.items_id.setdefault(self.axe_name, len(self.items_id))
self.env.inventory_items_quantity.update({self.axe_name: 0})
self.env.entities.add(self.axe_name)
# Action Space
if self.axe_material == 'wooden':
axe_recipe = {'stick': 2, 'plank': 3}
elif self.axe_material == 'iron':
axe_recipe = {'stick': 2, 'iron': 3}
for item in axe_recipe:
if item not in self.env.items:
self.env.items.add(item)
self.env.items_id.setdefault(item, len(self.items_id))
self.env.inventory_items_quantity.update(axe_recipe)
self.env.recipes.update({self.axe_name: {'input': axe_recipe, 'output': {self.axe_name: 1}}})
# self.action_craft_str.update({'Craft_' + self.axe_name: len(self.action_str)})
self.env.actions_id.update({'Craft_' + self.axe_name: len(self.env.actions_id)})
self.env.select_actions_id.update({'Select_' + self.axe_name: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
self.env.action_space = spaces.Discrete(len(self.env.actions_id))
def reset(self):
obs = self.env.reset()
if self.axe_material == 'wooden':
self.env.inventory_items_quantity.update({'wooden_axe': 0, 'stick': 2, 'plank': 3})
elif self.axe_material == 'iron':
self.env.inventory_items_quantity.update({'iron_axe': 0, 'stick': 2, 'iron': 3})
return obs
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Craft_' + self.axe_name in self.limited_actions_id,\
"Cannot use AxetoBreakHard novelty because you do not have " + "Craft_" + self.axe_name + " in LimitActions"
assert 'Break' in self.limited_actions_id, "Cannot use axetobreak novelty because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Craft_' + self.axe_name]:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward, result, step_cost, message = self.craft(self.axe_name)
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
elif action_id == actions_id['Break']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.env.block_in_front_str not in self.env.unbreakable_items:
if self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'wooden_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.5 # 1800.0
elif self.env.inventory_items_quantity[self.axe_name] >= 1 and self.env.selected_item == 'iron_axe':
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
step_cost = step_cost * 0.25 # 900.0
else:
result = False
message = "Cannot break without " + self.axe_name + " selected"
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
def craft(self, item_to_craft):
reward = -1 # default reward to craft in a wrong way
result = True
step_cost = 0 # default step_cost
message = ''
# Check if there are enough ingredients in the inventory
have_all_ingredients = {}
for item in self.env.recipes[item_to_craft]['input']:
if item in self.env.inventory_items_quantity and self.env.inventory_items_quantity[item] >= \
self.env.recipes[item_to_craft]['input'][item]:
have_all_ingredients[item] = True
else:
have_all_ingredients[item] = False
# If there are not enough ingredients in the inventory
if False in have_all_ingredients.values():
result = False
message = "Missing items: "
if item_to_craft == 'tree_tap':
step_cost = 360.0
elif item_to_craft == 'pogo_stick':
step_cost = 480.0
for item in have_all_ingredients:
if not have_all_ingredients[item]:
message += str(self.env.recipes[item_to_craft]['input'][item]) + ' ' + item + ', '
return reward, result, step_cost, message[:-2]
# Craft
else:
# If more than 1 ingredient needed, agent needs to be in front of crafting_table
if len(self.env.recipes[item_to_craft]['input']) > 1:
self.env.update_block_in_front()
if not self.env.block_in_front_str == 'crafting_table':
if item_to_craft == 'tree_tap':
step_cost = 720.0
elif item_to_craft == 'pogo_stick':
step_cost = 840.0
elif item_to_craft == self.axe_name:
step_cost = 600.0
result = False
message = 'Need to be in front of crafting_table'
return reward, result, step_cost, message
reward = self.reward_intermediate # default reward to craft in a good way
# Reduce ingredients from the inventory
for item in self.env.recipes[item_to_craft]['input']:
self.env.inventory_items_quantity[item] -= self.env.recipes[item_to_craft]['input'][item]
# Add item_to_craft in the inventory
self.env.inventory_items_quantity[item_to_craft] += self.env.recipes[item_to_craft]['output'][item_to_craft]
if item_to_craft == 'plank':
step_cost = 1200.0
elif item_to_craft == 'stick':
step_cost = 2400.0
elif item_to_craft == 'tree_tap':
step_cost = 7200.0
elif item_to_craft == 'pogo_stick':
step_cost = 8400.0
elif item_to_craft == self.axe_name:
step_cost = 6000.0
message = 'Crafted ' + item_to_craft
return reward, result, step_cost, message
class Fence(gym.core.Wrapper):
"""
Novelty wrapper to add fence around items in the map
"""
def __init__(self, env, difficulty, fence_material):
super().__init__(env)
self.fence_name = fence_material + '_fence' # oak_fence, jungle_fence
self.env.items.add(self.fence_name)
self.env.items_id.setdefault(self.fence_name, len(self.items_id))
self.env.select_actions_id.update({'Select_' + self.fence_name: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
if difficulty == 'easy':
self.fence_percent_range = (20, 50)
elif difficulty == 'medium':
self.fence_percent_range = (50, 90)
else:
self.fence_percent_range = (90, 100)
def reset(self):
self.env.reset()
result = np.array(np.where((self.env.map != 0) & (self.env.map != self.env.items_id['wall'])))
# Shuffling locations in result
indices = np.arange(len(result[0]))
np.random.shuffle(indices)
result[0] = result[0][indices]
result[1] = result[1][indices]
fence_percent = np.random.randint(low=self.fence_percent_range[0], high=self.fence_percent_range[1], size=1)[0]
for i in range(int(np.ceil(len(result[0]) * (fence_percent / 100)))):
r, c = result[0][i], result[1][i]
self.env.add_fence_around((r, c), self.fence_name)
# Update after each reset
obs = self.get_observation()
self.update_block_in_front()
return obs
class FenceRestriction(gym.core.Wrapper):
"""
Novelty wrapper to restrict breaking an item around fence until fence(s) are broken.
All fences are always breakable.
"""
def __init__(self, env, difficulty, fence_material):
super().__init__(env)
self.difficulty = difficulty
self.env2 = Fence(env, 'medium', fence_material=fence_material)
def reset(self):
obs = self.env2.reset()
return obs
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Break' in self.limited_actions_id, "Cannot use fencerestriction novelty because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Break'] and self.difficulty != 'easy':
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
if self.env.block_in_front_str not in self.env.unbreakable_items:
if self.env.block_in_front_str == self.env2.fence_name:
# Fence is always breakable
obs, reward, done, info = self.env.step(action_id)
else:
fence_restriction = False
if self.difficulty == 'medium':
# In medium, one side of the item to break must not have fence and agent must be on that side
r, c = self.agent_location
if self.agent_facing_str == 'NORTH' or self.agent_facing_str == 'SOUTH':
if self.map[r][c - 1] == self.items_id[self.env2.fence_name] or self.map[r][c + 1] == self.items_id[self.env2.fence_name]:
fence_restriction = True
elif self.agent_facing_str == 'WEST' or self.agent_facing_str == 'EAST':
if self.map[r - 1][c] == self.items_id[self.env2.fence_name] or self.map[r + 1][c] == self.items_id[self.env2.fence_name]:
fence_restriction = True
else:
# In hard, all the sides of the item to break must not have fence
r, c = self.block_in_front_location
for r_item in [r - 1, r, r + 1]:
for c_item in [c - 1, c, c + 1]:
if self.map[r_item][c_item] == self.items_id[self.env2.fence_name]:
fence_restriction = True
break
if not fence_restriction:
obs, reward, done, info = self.env.step(action_id)
else:
result = False
message = "Cannot break due to fence restriction"
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class AddItem(gym.core.Wrapper):
"""
Novelty wrapper to add a new item in the map
"""
def __init__(self, env, difficulty, item_to_add):
super().__init__(env)
self.item_to_add = item_to_add
self.env.items.add(self.item_to_add)
self.env.items_id.setdefault(self.item_to_add, len(self.items_id))
# self.env.entities.add(self.item_to_add)
self.env.select_actions_id.update({'Select_' + self.item_to_add: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
if difficulty == 'easy':
self.item_percent_range = (1, 10)
elif difficulty == 'medium':
self.item_percent_range = (10, 20)
else:
self.item_percent_range = (20, 30)
def reset(self):
self.env.reset()
result = np.array(np.where(self.env.map == 0))
# Shuffling locations in result
indices = np.arange(len(result[0]))
np.random.shuffle(indices)
result[0] = result[0][indices]
result[1] = result[1][indices]
item_percent = np.random.randint(low=self.item_percent_range[0], high=self.item_percent_range[1], size=1)[0]
for i in range(int(np.ceil(len(result[0]) * (item_percent / 100)))):
r, c = result[0][i], result[1][i]
if (r, c) != self.env.agent_location:
self.env.map[r][c] = self.items_id[self.item_to_add]
# Update after each reset
obs = self.get_observation()
self.update_block_in_front()
return obs
class Crate(gym.core.Wrapper):
"""
Novelty wrapper to add crate(s) in the map. When the crate is broken, some ingredients of the goal_item_to_craft
gets in the inventory
"""
def __init__(self, env, difficulty):
super().__init__(env)
self.env2 = AddItem(env, 'easy', item_to_add='crate')
if difficulty == 'easy':
item_percent_range = (99, 100)
elif difficulty == 'medium':
item_percent_range = (50, 90)
else:
item_percent_range = (10, 50)
item_percent = np.random.randint(low=item_percent_range[0], high=item_percent_range[1], size=1)[0]
total_ingredients = 0
ingredients = []
for item in self.recipes[self.goal_item_to_craft]['input']:
total_ingredients += self.recipes[self.goal_item_to_craft]['input'][item]
ingredients.append(item)
crate_ingredients_num = int(np.ceil((item_percent / 100) * total_ingredients))
self.crate_ingredients = []
while crate_ingredients_num:
item = np.random.choice(ingredients, size=1)[0]
if self.crate_ingredients.count(item) < self.recipes[self.goal_item_to_craft]['input'][item]:
self.crate_ingredients.append(item)
crate_ingredients_num -= 1
def reset(self):
obs = self.env2.reset()
return obs
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Break' in self.limited_actions_id, "Cannot use crate novelty because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Break'] and self.env.block_in_front_str == 'crate':
for item in self.crate_ingredients:
self.env.inventory_items_quantity[item] += 1
obs, reward, done, info = self.env.step(action_id)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class ReplaceItem(gym.core.Wrapper):
"""
Novelty wrapper to replace an item with another
"""
def __init__(self, env, difficulty, item_to_replace='wall', item_to_replace_with='brick'):
super().__init__(env)
self.item_to_replace = item_to_replace
self.item_to_replace_with = item_to_replace_with
assert self.item_to_replace in self.env.items_id, "Item to replace (" + self.item_to_replace + \
") is not in the original map"
assert self.item_to_replace_with not in self.env.items_id, "Item to replace with (" + self.item_to_replace_with \
+ ") should be a new item"
self.env.items.add(self.item_to_replace_with)
self.env.items_id.setdefault(self.item_to_replace_with, len(self.items_id))
# self.env.entities.add(self.item_to_replace_with)
self.env.select_actions_id.update({'Select_' + self.item_to_replace_with: len(self.env.actions_id)})
self.env.actions_id.update(self.env.select_actions_id)
if self.item_to_replace == 'wall':
self.env.unbreakable_items.add(self.item_to_replace_with)
if difficulty == 'easy':
self.item_percent_range = (5, 20)
elif difficulty == 'medium':
self.item_percent_range = (40, 90)
else:
self.item_percent_range = (99, 100)
def reset(self):
self.env.reset()
result = np.array(np.where(self.env.map == self.env.items_id[self.item_to_replace]))
# Shuffling locations in result
indices = np.arange(len(result[0]))
np.random.shuffle(indices)
result[0] = result[0][indices]
result[1] = result[1][indices]
item_percent = np.random.randint(low=self.item_percent_range[0], high=self.item_percent_range[1], size=1)[0]
for i in range(int(np.ceil(len(result[0]) * (item_percent / 100)))):
r, c = result[0][i], result[1][i]
if (r, c) != self.env.agent_location:
self.env.map[r][c] = self.items_id[self.item_to_replace_with]
# Update after each reset
obs = self.env.get_observation()
self.update_block_in_front()
return obs
class FireWall(gym.core.Wrapper):
"""
Novelty wrapper to add fire_wall, agent dies when it's next to fire_wall
"""
def __init__(self, env, difficulty='hard'):
super().__init__(env)
self.env2 = ReplaceItem(env, difficulty, item_to_replace='wall', item_to_replace_with='fire_wall')
def reset(self):
obs = self.env2.reset()
return obs
def step(self, action_id):
obs, reward, done, info = self.env.step(action_id)
r, c = self.agent_location
close_to_fire_wall = False
# NORTH
if (0 <= (r - 1) <= self.map_size - 1) and self.map[r - 1][c] == self.env.items_id['fire_wall']:
close_to_fire_wall = True
# SOUTH
elif (0 <= (r + 1) <= self.map_size - 1) and self.map[r + 1][c] == self.env.items_id['fire_wall']:
close_to_fire_wall = True
# WEST
elif (0 <= (c - 1) <= self.map_size - 1) and self.map[r][c - 1] == self.env.items_id['fire_wall']:
close_to_fire_wall = True
# EAST
elif (0 <= (c + 1) <= self.map_size - 1) and self.map[r][c + 1] == self.env.items_id['fire_wall']:
close_to_fire_wall = True
if close_to_fire_wall:
reward = -self.reward_done // 2
done = True
info['message'] = 'You died due to fire_wall'
# Update after each step
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
return obs, reward, done, info
def remap_action_difficulty(env, difficulty='hard'):
"""
Remap actions randomly
If LimitActions wrapper is used only limited_actions will be remapped regardless of any difficulty
"""
if hasattr(env, 'limited_actions_id'):
env.set_limited_actions_id(env.remap_action(env.limited_actions_id, 0))
else:
if difficulty == 'easy':
env.manipulation_actions_id = env.remap_action(env.manipulation_actions_id, 0)
env.actions_id.update(env.manipulation_actions_id)
elif difficulty == 'medium':
env.manipulation_actions_id = env.remap_action(env.manipulation_actions_id, 0)
env.craft_actions_id = env.remap_action(env.craft_actions_id, len(env.manipulation_actions_id))
env.actions_id.update(env.manipulation_actions_id)
env.actions_id.update(env.craft_actions_id)
else:
env.actions_id = env.remap_action(env.actions_id, 0)
env.craft_actions_id = {action: env.actions_id[action] for action in env.actions_id if
action.startswith('Craft')}
env.select_actions_id = {action: env.actions_id[action] for action in env.actions_id if
action.startswith('Select')}
return env
# Novelty without difficulty types:
class BlockItem(gym.core.Wrapper):
"""
Novelty wrapper to block crafting_table from tree_log when rubber is extracted
"""
def __init__(self, env):
super().__init__(env)
self.items_to_block = 'crafting_table'
self.item_to_block_from = 'tree_log'
self.env.items.add('fence')
self.env.items_id.setdefault('fence', len(self.items_id))
def step(self, action_id):
old_rubber_quantity = copy.deepcopy(self.env.inventory_items_quantity['rubber'])
obs, reward, done, info = self.env.step(action_id)
# Extract_rubber
if action_id == self.actions_id['Extract_rubber']:
if old_rubber_quantity < self.env.inventory_items_quantity['rubber']:
# Block by self.item_to_block_from
# self.env.block_items(item_to_block=self.items_to_block, item_to_block_from=self.item_to_block_from)
# Block by fence
result = np.where(self.env.map == self.env.items_id[self.items_to_block])
for i in range(len(result[0])):
r, c = result[0][i], result[1][i]
self.env.add_fence_around((r, c))
return obs, reward, done, info
class AddChopAction(gym.core.Wrapper):
"""
Novelty wrapper to add chop action
It's like break action, but instead of 1 item, agent will get 2 items, but step_cost will be higher (1.2 times)
"""
def __init__(self, env):
super().__init__(env)
self.env.manipulation_actions_id['Chop'] = len(self.actions_id)
self.env.actions_id.update(self.manipulation_actions_id)
self.action_space = spaces.Discrete(len(self.actions_id))
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Chop' in self.limited_actions_id, "Cannot use addchop novelty because you do not have Chop in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Chop']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0 * 1.2
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.block_in_front_str not in self.unbreakable_items:
block_r, block_c = self.block_in_front_location
self.map[block_r][block_c] = 0
self.inventory_items_quantity[self.block_in_front_str] += 1 * 2
reward = self.reward_intermediate
else:
result = False
message = "Cannot chop " + self.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class AddJumpAction(gym.core.Wrapper):
"""
Novelty wrapper to add jump action, when it's executed, the agent jumps 2 blocks forward
"""
def __init__(self, env):
super().__init__(env)
self.env.manipulation_actions_id['Jump'] = len(self.actions_id)
self.env.actions_id.update(self.manipulation_actions_id)
self.action_space = spaces.Discrete(len(self.actions_id))
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Jump' in self.limited_actions_id, "Cannot use addjump novelty because you do not have Jump in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Jump']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
r, c = self.agent_location
reward = -1 # default reward
result = True
step_cost = 0 # default step_cost
message = ''
if self.agent_facing_str == 'NORTH' and (0 <= (r - 2) <= self.map_size - 1) and self.map[r - 2][c] == 0:
self.set_agent_location(r - 2, c)
elif self.agent_facing_str == 'SOUTH' and (0 <= (r + 2) <= self.map_size - 1) and self.map[r + 2][c] == 0:
self.set_agent_location(r + 2, c)
elif self.agent_facing_str == 'WEST' and (0 <= (c - 2) <= self.map_size - 1) and self.map[r][c - 2] == 0:
self.set_agent_location(r, c - 2)
elif self.agent_facing_str == 'EAST' and (0 <= (c + 2) <= self.map_size - 1) and self.map[r][c + 2] == 0:
self.set_agent_location(r, c + 2)
else:
result = False
message = 'Block in path'
step_cost = 27.906975 * 2
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class BreakIncrease(gym.core.Wrapper):
"""
Novelty wrapper to get 2 items in inventory when the agent break that item instead of 1
itemtobreakmore: apply this to only itemtobreakmore or if itemtobreakmore == '', then to all items
"""
def __init__(self, env, itemtobreakmore=''):
super().__init__(env)
self.itemtobreakmore = itemtobreakmore
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
assert 'Break' in self.limited_actions_id, "Cannot use breakincrease novelty because you do not have Break in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
if action_id == actions_id['Break']:
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
reward = -1 # default reward
result = True
step_cost = 3600.0
message = ''
self.env.update_block_in_front()
# If block in front is not air and wall, place the block in front in inventory
if self.env.block_in_front_str not in self.env.unbreakable_items:
block_r, block_c = self.env.block_in_front_location
self.env.map[block_r][block_c] = 0
if self.itemtobreakmore == self.env.block_in_front_str:
self.env.inventory_items_quantity[self.itemtobreakmore] += 2
elif self.itemtobreakmore == '':
self.env.inventory_items_quantity[self.block_in_front_str] += 2
else:
self.env.inventory_items_quantity[self.env.block_in_front_str] += 1
reward = self.reward_intermediate
else:
result = False
message = "Cannot break " + self.env.block_in_front_str
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
class ExtractIncDec(gym.core.Wrapper):
"""
Novelty wrapper to increase/decrease string when Extract_string action is executed
incdec: 'increase' to increase, 'decrease' to decrease
"""
def __init__(self, env, incdec='decrease'):
super().__init__(env)
self.incdec = incdec
def step(self, action_id):
if hasattr(self, 'limited_actions_id'):
has_extract = False
for action in self.limited_actions_id:
if action.startswith('Extract'):
has_extract = True
break
assert has_extract, "Cannot use extractincdec novelty because you do not have Extract action in LimitActions"
actions_id = self.limited_actions_id
else:
actions_id = self.actions_id
self.env.last_action = list(actions_id.keys())[list(actions_id.values()).index(action_id)]
if self.env.last_action.startswith('Extract'):
reward = -1 # default reward
result = True
step_cost = 120.0 # default step_cost
message = ''
if self.env_id.startswith('NovelGridworld-Bow'):
if self.block_in_front_str == 'wool':
if self.incdec == 'increase':
self.inventory_items_quantity['string'] += 4 * 2 # Extract_string
else:
self.inventory_items_quantity['string'] += 4 // 2 # Extract_string
block_r, block_c = self.block_in_front_location
self.map[block_r][block_c] = 0
reward = self.reward_intermediate
step_cost = 5000
else:
result = False
message = "No wool found"
elif self.env_id.startswith('NovelGridworld-Pogostick'):
# Make sure that block_in_front_location is next to a tree
block_in_front_next_to_tree = self.is_block_in_front_next_to('tree_log')
if self.block_in_front_str == 'tree_tap':
if block_in_front_next_to_tree:
if self.incdec == 'increase':
self.inventory_items_quantity['rubber'] += 1 * 2 # Extract_rubber
reward = self.reward_intermediate
step_cost = 50000
else:
result = False
message = "No tree_log near tree_tap"
else:
result = False
message = "No tree_tap found"
# Update after each step
self.env.grab_entities()
if hasattr(self, 'observation'):
obs = self.observation()
else:
obs = self.env.get_observation()
self.env.update_block_in_front()
done = False
if self.env.inventory_items_quantity[self.goal_item_to_craft] >= 1:
reward = self.reward_done
done = True
info = {'result': result, 'step_cost': step_cost, 'message': message}
# Update after each step
self.env.step_count += 1
self.env.last_step_cost = step_cost
self.env.last_reward = reward
self.env.last_done = done
lasts = {'last_action': self.env.last_action, 'step_count': self.env.step_count,
'last_step_cost': self.env.last_step_cost, 'last_reward': self.env.last_reward,
'last_done': self.env.last_done}
self.set_lasts(lasts)
else:
obs, reward, done, info = self.env.step(action_id)
return obs, reward, done, info
#################### Novelty Helper ####################
def inject_novelty(env, novelty_name, difficulty='hard', novelty_arg1='', novelty_arg2=''):
novelty_names = ['addchop', 'additem', 'addjump', 'axe', 'axetobreak', 'breakincrease', 'crate', 'extractincdec', 'fence',
'fencerestriction', 'firewall', 'remapaction', 'replaceitem']
assert novelty_name in novelty_names, "novelty_name must be one of " + str(novelty_names)
if novelty_name in ['additem', 'axe', 'axetobreak', 'crate', 'fence', 'fencerestriction', 'firewall', 'remapaction', 'replaceitem']:
assert difficulty in ['easy', 'medium', 'hard'], "difficulty must be one of 'easy', 'medium', 'hard'"
if novelty_name == 'addchop':
env = AddChopAction(env)
elif novelty_name == 'additem':
assert novelty_arg1, "For additem novelty, novelty_arg1 (name of the item to add) is needed"
env = AddItem(env, difficulty, novelty_arg1)
elif novelty_name == 'addjump':
env = AddJumpAction(env)
elif novelty_name == 'axe':
assert novelty_arg1 in ['wooden', 'iron'], \
"For axe novelty, novelty_arg1 (attribute of axe, e.g. wooden, iron) is needed"
if novelty_arg2:
assert novelty_arg2 in ['true', 'false'], "For axe novelty, novelty_arg2 (breakincrease) must be 'true' or 'false'"
if difficulty == 'easy':
env = AxeEasy(env, novelty_arg1, novelty_arg2)
elif difficulty == 'medium':
env = AxeMedium(env, novelty_arg1, novelty_arg2)
elif difficulty == 'hard':
env = AxeHard(env, novelty_arg1, novelty_arg2)
else:
if difficulty == 'easy':
env = AxeEasy(env, novelty_arg1)
elif difficulty == 'medium':
env = AxeMedium(env, novelty_arg1)
elif difficulty == 'hard':
env = AxeHard(env, novelty_arg1)
elif novelty_name == 'axetobreak':
assert novelty_arg1 in ['wooden', 'iron'], \
"For axe novelty, novelty_arg1 (attribute of axe, e.g. wooden, iron) is needed"
if difficulty == 'easy':
env = AxetoBreakEasy(env, novelty_arg1)
elif difficulty == 'medium':
env = AxetoBreakMedium(env, novelty_arg1)
elif difficulty == 'hard':
env = AxetoBreakHard(env, novelty_arg1)
elif novelty_name == 'breakincrease':
if novelty_arg1:
assert novelty_arg1 in env.items, env.itemtobreakmore + " is not in " + env.env_id
env = BreakIncrease(env, novelty_arg1)
else:
env = BreakIncrease(env)
elif novelty_name == 'crate':
env = Crate(env, difficulty)
elif novelty_name == 'extractincdec':
assert novelty_arg1 in ['increase', 'decrease'], \
"For extractincdec novelty, novelty_arg1 ('increase', 'decrease') is needed"
assert env.env_id != 'NovelGridworld-Bow-v0', "There is nothing to extract in NovelGridworld-Bow-v0"
if env.env_id == 'NovelGridworld-Bow-v1':
assert novelty_arg1 == 'decrease', "In NovelGridworld-Bow-v1, increasing string extraction will not benefit " \
"as only 3 string are needed"
assert not env.env_id.startswith('NovelGridworld-Pogostick'), "In NovelGridworld-Pogostick, you should not use " \
"extractincdec novelty because rubber extraction cannot be decreased, and increasing rubber extraction will" \
" not benefit as only 1 rubber is needed"
env = ExtractIncDec(env, novelty_arg1)
elif novelty_name == 'fence':
assert novelty_arg1, "For fence novelty, novelty_arg1 (attribute of fence, e.g. oak, jungle) is needed"
env = Fence(env, difficulty, novelty_arg1)
elif novelty_name == 'fencerestriction':
assert novelty_arg1, "For fencerestriction novelty, novelty_arg1 (attribute of fence, e.g. oak, jungle) is needed"
env = FenceRestriction(env, difficulty, novelty_arg1)
elif novelty_name == 'firewall':
env = FireWall(env, difficulty)
elif novelty_name == 'remapaction':
env = remap_action_difficulty(env, difficulty)
elif novelty_name == 'replaceitem':
assert novelty_arg1 and novelty_arg2, "For replaceitem novelty, novelty_arg1 (Item to replace) and novelty_arg2" \
"(Item to replace with) are needed"
env = ReplaceItem(env, difficulty, novelty_arg1, novelty_arg2)
return env
| 41.675224
| 150
| 0.588173
| 8,864
| 69,806
| 4.385153
| 0.037568
| 0.092925
| 0.035503
| 0.037278
| 0.873296
| 0.84371
| 0.802856
| 0.774711
| 0.755364
| 0.739413
| 0
| 0.012878
| 0.312552
| 69,806
| 1,674
| 151
| 41.700119
| 0.797116
| 0.085279
| 0
| 0.786019
| 0
| 0
| 0.091305
| 0.002495
| 0
| 0
| 0
| 0
| 0.025575
| 1
| 0.037511
| false
| 0
| 0.00341
| 0
| 0.081841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8aba7cf6bbae988523fe116fa67e1795059a1673
| 13,460
|
py
|
Python
|
A2/Versions/assign2v4.py
|
ShamsuddohaShameem17/coms3200
|
211df3ea954c33f64cc833772e07e90356baa853
|
[
"MIT"
] | null | null | null |
A2/Versions/assign2v4.py
|
ShamsuddohaShameem17/coms3200
|
211df3ea954c33f64cc833772e07e90356baa853
|
[
"MIT"
] | null | null | null |
A2/Versions/assign2v4.py
|
ShamsuddohaShameem17/coms3200
|
211df3ea954c33f64cc833772e07e90356baa853
|
[
"MIT"
] | 1
|
2021-05-13T21:09:21.000Z
|
2021-05-13T21:09:21.000Z
|
# REF: https://pymotw.com/2/struct/
#Creating a RUSH packet sender
import sys
import socket
from collections import namedtuple
from struct import pack, unpack
LOCALHOST = "127.0.0.1"
LOCAL_ADDR_INFO =(LOCALHOST,0)
PACKET_SIZE = 1472
PAYLOAD_SIZE = 1466
PAYLOAD_SIZE_BITS = PAYLOAD_SIZE * 8
SEND_SIZE = 1500 #Max size of RUSH packet
RECV_SIZE = 1500 #Max size of RUSH packet
# Creating RUSH packets
rushFields = ("seq_num","ack_num","ack_flag","nak_flag","get_flag","dat_flag","fin_flag","reserved","data")
defaultFieldValues = [0,0,0,0,0,0,0,0]
Packet = namedtuple('RUSH', rushFields, defaults = defaultFieldValues)
# Find free ports by PC
def free_port():
dummy = socket.socket()
dummy.bind(LOCAL_ADDR_INFO) # Bind to a free port provided by the host computer. (port 0)
freePort = dummy.getsockname()[1]
dummy.close()
return freePort # Return the port number assigned.
# Convert string to integer (byte representation)
def str_to_int(string, pad=PAYLOAD_SIZE):
b_str = string.encode("UTF-8")
if pad is not None:
for i in range(len(string), pad):
b_str += b'\0'
return int.from_bytes(b_str, byteorder='big')
# Convert byte (represented as integer) to string (data)
def int_to_str(integer, size=PAYLOAD_SIZE):
return integer.to_bytes(size, byteorder='big').rstrip(b'\x00').decode("UTF-8")
# Convert the packet into bytes array
def raw(packet):
# Packet Identifier
seq_num = packet.seq_num
ack_num = packet.ack_num
packet_id_bytes = pack('!hh',seq_num,ack_num)
# Flags
ack_flag = packet.ack_flag
nak_flag = packet.nak_flag
get_flag = packet.get_flag
dat_flag = packet.dat_flag
fin_flag = packet.fin_flag
flag_list = [ack_flag,nak_flag,get_flag,dat_flag,fin_flag]
flag_str = "".join(map(str,flag_list))+"000"
flag_byte = pack("!B",int(flag_str,base=2))
# Reserved Byte
reserved = b'\x00'
#Data
#data_byte = str_to_int(packet.data)
# data_byte =bytes(str(str_to_int(packet.data)).encode())
# print(data_byte)
# print(bytes(str(data_byte).encode()))
# raw_byte =b"".join([packet_id_bytes,flag_byte,reserved, data_byte])
##Take 2
# data_length = len(str(packet.data))
# padding_size = PAYLOAD_SIZE-data_length
# print(packet.data)
# print(">>>",data_length,padding_size)
# data_byte = packet.data.encode("UTF-8")
# for i in range(len(packet.data), PAYLOAD_SIZE):
# data_byte += b'\0'
# print(data_byte)
##Take 3
data_byte = str_to_int(packet.data).to_bytes(PAYLOAD_SIZE, byteorder='big')
raw_byte =b"".join([packet_id_bytes,flag_byte,reserved, data_byte])
return raw_byte
# Convert raw packet information into packet (repr)
def raw_packet_decode(rawByte):
# First 6 Bytes are RUSH HEADER without payload
unpacked_data = unpack('!hhBx', rawByte[0:6]) #gives (1, 0, 8192)
seq_num, ack_num, flag_byte= unpacked_data
flagStr = bin(int(str(flag_byte),base=10))[2:7] #first 5 bits
print("Binary string of byte:",flagStr)
flags = [int(c) for c in flagStr]
data_info = rawByte[6:].rstrip(b'\x00').decode("UTF-8")
packet_val_list = [seq_num,ack_num]
packet_val_list.extend(flags)
return Packet(*packet_val_list,data = data_info)
def print_packet(packet):
print(repr(packet))
#########################################################################################################
class Connection:
def __init__(self, my_ip, my_port):
self._my_info = (my_ip, my_port)
self._socket = None
self._seq_num = 1
def _print(self, pkt, port, mode):
output = "{} port {}:\n (seq_num={}, ack_num={}, flags={}{}{}{}{})".format(mode, port, pkt.seq_num,
pkt.ack_num, pkt.ack_flag, pkt.nak_flag, pkt.get_flag, pkt.dat_flag, pkt.fin_flag)
output += "\n Data: {}".format(repr(int_to_str(pkt.data)))
print(output + "\n")
def _find_freeport(self):
return free_port()
def connect(self):
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(self._my_info)
return True
except socket.error as err:
print("Error encountered when opening socket:\n", err)
return False
def close(self):
self._socket.close()
def send_request(self, resource):
pkt = RUSH(seq_num=self._seq_num, get_flag=1, data=str_to_int(resource))
self._socket.sendto(raw(pkt), self._serv_info)
self._seq_num += 1
self._print(pkt, self._serv_info[1], SEND_MODE)
def recv_pkt(self):
raw_data, info = self._socket.recvfrom(RECV_SIZE)
assert len(raw_data) <= PACKET_SIZE, "Received overlong packet: " + repr(raw_data)
try:
#OLD:
return RUSH(raw_data), info
##########
except:
assert False, "Could not decode packet: " + repr(raw_data)
def run(self):
while True:
pkt, info = self.recv_pkt()
self._print(pkt, info[1], RECV_MODE)
if pkt.fin_flag == 1 and all(i == 0 for i in (pkt.ack_flag, pkt.nak_flag, pkt.dat_flag, pkt.get_flag)):
cli_fin_ack = RUSH(seq_num=self._seq_num, ack_num=pkt.seq_num, fin_flag=1, ack_flag=1)
self._socket.sendto(raw(cli_fin_ack), self._serv_info)
self._seq_num += 1
self._print(cli_fin_ack, self._serv_info[1], SEND_MODE)
while True:
serv_fin_ack, info = self.recv_pkt()
self._print(serv_fin_ack, info[1], RECV_MODE)
if serv_fin_ack.fin_flag == 1 and serv_fin_ack.ack_flag == 1 and \
all(i == 0 for i in (serv_fin_ack.nak_flag, serv_fin_ack.dat_flag, serv_fin_ack.get_flag)):
return # end of connection
elif pkt.dat_flag == 1:
ack = RUSH(seq_num=self._seq_num, ack_num=pkt.seq_num, dat_flag=1, ack_flag=1)
self._socket.sendto(raw(ack), self._serv_info)
self._seq_num += 1
self._print(ack, self._serv_info[1], SEND_MODE)
#############################################################################################
def main(argv):
if len(argv) >= 2:
print("Usage: python3 assign2.py")
return
my_port = free_port()
serv_port = free_port()
print("my port:",my_port,"serv port:",serv_port)
LOCAL_ADDR_INFO = (LOCALHOST,my_port)
# sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# sock.bind(LOCAL_ADDR_INFO)
# #Recieve data
# totalData = []
# while True:
# # Data are in bytes
# data = sock.recv(RECV_SIZE)
# if not data: break
# totalData.append(data)
# print(b''.join(totalData))
#========================================================================#
# conn = Connection(LOCALHOST, my_port, LOCALHOST, serv_port, debug_level)
# if not conn.connect():
# return
# try:
# conn.send_request(FILE_NAME)
# conn.run()
# except AssertionError as e:
# print(e.args[0])
#conn.close()
#========================================================================#
#sock.close()
raw_in = b'\x00\x02\x00\x01\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
print(raw_packet_decode(raw_in))
tst = Packet(seq_num=1,ack_flag=1,data='file.txt')
raw_val = raw(tst)
print(raw_val)
print("Decoding",raw_packet_decode(raw_val))
pass
if __name__ == "__main__":
main(sys.argv)
| 64.711538
| 5,904
| 0.667088
| 2,534
| 13,460
| 3.433702
| 0.080505
| 1.010918
| 1.515343
| 2.019078
| 0.630157
| 0.607057
| 0.593265
| 0.566257
| 0.557292
| 0.541892
| 0
| 0.259171
| 0.129123
| 13,460
| 208
| 5,905
| 64.711538
| 0.483109
| 0.1263
| 0
| 0.070866
| 0
| 0.007874
| 0.546048
| 0.512535
| 0
| 1
| 0
| 0
| 0.015748
| 1
| 0.11811
| false
| 0.007874
| 0.031496
| 0.015748
| 0.244094
| 0.125984
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
76f5a0b102fd0e16dd9343e88d07aa7fa886b5c7
| 92
|
py
|
Python
|
parameters_8080.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8080.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8080.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$bb52f3e9876e0730$0bb37c618f43b658beefac05844b4795d0311e45"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.516484
| 0.01087
| 92
| 1
| 92
| 92
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
6a689443e7a572d063847663310c8b58021ec566
| 35,353
|
py
|
Python
|
COMET/resources/svg_resources_rc.py
|
dallaval5u/COMET
|
8c5793faafe2797dd4100507aa0fe1e71cf9f6c0
|
[
"MIT"
] | null | null | null |
COMET/resources/svg_resources_rc.py
|
dallaval5u/COMET
|
8c5793faafe2797dd4100507aa0fe1e71cf9f6c0
|
[
"MIT"
] | null | null | null |
COMET/resources/svg_resources_rc.py
|
dallaval5u/COMET
|
8c5793faafe2797dd4100507aa0fe1e71cf9f6c0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.9.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x06\xf5\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\
\x6c\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\
\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\
\x6e\x74\x73\x2f\x31\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\
\x6e\x73\x3a\x63\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\
\x65\x61\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\
\x67\x2f\x6e\x73\x23\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\
\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\
\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\
\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\
\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\
\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\
\x6f\x64\x69\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\
\x70\x6f\x64\x69\x2e\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\
\x2e\x6e\x65\x74\x2f\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\
\x69\x2d\x30\x2e\x64\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\
\x73\x3a\x69\x6e\x6b\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\
\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\
\x6f\x72\x67\x2f\x6e\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\
\x68\x3d\x22\x32\x34\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\
\x3d\x22\x32\x34\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\
\x3d\x22\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x0a\x20\x20\x20\
\x66\x69\x6c\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x0a\x20\x20\x20\x73\
\x74\x72\x6f\x6b\x65\x3d\x22\x23\x30\x30\x30\x22\x0a\x20\x20\x20\
\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3d\x22\x32\x22\
\x0a\x20\x20\x20\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x63\
\x61\x70\x3d\x22\x72\x6f\x75\x6e\x64\x22\x0a\x20\x20\x20\x73\x74\
\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x6a\x6f\x69\x6e\x3d\x22\x72\
\x6f\x75\x6e\x64\x22\x0a\x20\x20\x20\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\x69\x64\x3d\x22\x73\x76\
\x67\x34\x22\x0a\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\
\x64\x6f\x63\x6e\x61\x6d\x65\x3d\x22\x63\x68\x65\x76\x72\x6f\x6e\
\x5f\x75\x70\x2e\x73\x76\x67\x22\x0a\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x30\x2e\
\x39\x32\x2e\x31\x20\x72\x31\x35\x33\x37\x31\x22\x3e\x0a\x20\x20\
\x3c\x6d\x65\x74\x61\x64\x61\x74\x61\x0a\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x6d\x65\x74\x61\x64\x61\x74\x61\x31\x30\x22\x3e\x0a\
\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\
\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\x74\x3d\
\x22\x22\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\
\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x73\x76\x67\
\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x79\x70\
\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\
\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x64\x63\
\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\x6c\x6c\x49\x6d\x61\x67\
\x65\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\
\x63\x3a\x74\x69\x74\x6c\x65\x3e\x3c\x2f\x64\x63\x3a\x74\x69\x74\
\x6c\x65\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\x3a\x57\
\x6f\x72\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\x52\
\x44\x46\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\x74\x61\
\x3e\x0a\x20\x20\x3c\x64\x65\x66\x73\x0a\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x64\x65\x66\x73\x38\x22\x20\x2f\x3e\x0a\x20\x20\x3c\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\
\x65\x77\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\
\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\
\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\
\x36\x36\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\
\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x22\x0a\x20\x20\
\x20\x20\x20\x6f\x62\x6a\x65\x63\x74\x74\x6f\x6c\x65\x72\x61\x6e\
\x63\x65\x3d\x22\x31\x30\x22\x0a\x20\x20\x20\x20\x20\x67\x72\x69\
\x64\x74\x6f\x6c\x65\x72\x61\x6e\x63\x65\x3d\x22\x31\x30\x22\x0a\
\x20\x20\x20\x20\x20\x67\x75\x69\x64\x65\x74\x6f\x6c\x65\x72\x61\
\x6e\x63\x65\x3d\x22\x31\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\x69\
\x74\x79\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\
\x22\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\x74\x68\x3d\x22\
\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x68\x65\x69\x67\x68\
\x74\x3d\x22\x31\x30\x30\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x6e\x61\x6d\x65\x64\x76\x69\x65\x77\x36\x22\x0a\x20\x20\
\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x66\x61\x6c\
\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x31\x33\x2e\x39\x30\x36\x34\x33\
\x33\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x63\x78\x3d\x22\x2d\x33\x2e\x38\x33\x30\x30\x33\x38\x31\x22\
\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\
\x79\x3d\x22\x36\x2e\x37\x32\x38\x38\x32\x34\x37\x22\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\
\x6f\x77\x2d\x78\x3d\x22\x2d\x39\x22\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\
\x3d\x22\x2d\x39\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\
\x69\x7a\x65\x64\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\x2d\x6c\
\x61\x79\x65\x72\x3d\x22\x73\x76\x67\x34\x22\x20\x2f\x3e\x0a\x20\
\x20\x3c\x70\x6f\x6c\x79\x6c\x69\x6e\x65\x0a\x20\x20\x20\x20\x20\
\x70\x6f\x69\x6e\x74\x73\x3d\x22\x31\x38\x20\x31\x35\x20\x31\x32\
\x20\x39\x20\x36\x20\x31\x35\x22\x0a\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x70\x6f\x6c\x79\x6c\x69\x6e\x65\x32\x22\x0a\x20\x20\x20\
\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\x6e\x6f\
\x6e\x65\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\
\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x23\x62\x37\x30\x30\x30\x30\
\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\
\x31\x22\x0a\x20\x20\x20\x20\x20\x74\x72\x61\x6e\x73\x66\x6f\x72\
\x6d\x3d\x22\x6d\x61\x74\x72\x69\x78\x28\x31\x2e\x37\x30\x33\x36\
\x39\x38\x32\x2c\x30\x2c\x30\x2c\x31\x2e\x37\x30\x33\x36\x39\x38\
\x32\x2c\x2d\x38\x2e\x34\x34\x33\x35\x38\x36\x37\x2c\x2d\x38\x2e\
\x37\x33\x36\x36\x31\x37\x36\x29\x22\x20\x2f\x3e\x0a\x3c\x2f\x73\
\x76\x67\x3e\x0a\
\x00\x00\x08\x46\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0d\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\
\x74\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\
\x65\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\
\x0d\x0a\x0d\x0a\x3c\x73\x76\x67\x0d\x0a\x20\x20\x20\x78\x6d\x6c\
\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\
\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\
\x74\x73\x2f\x31\x2e\x31\x2f\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\
\x6e\x73\x3a\x63\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\
\x65\x61\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\
\x67\x2f\x6e\x73\x23\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\
\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\
\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\
\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\
\x23\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\
\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0d\x0a\x20\
\x20\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\
\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\
\x73\x76\x67\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\x73\x6f\x75\x72\x63\x65\x66\
\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\x44\x54\x44\x2f\x73\x6f\x64\
\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\x74\x64\x22\x0d\x0a\x20\x20\
\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\x73\x63\x61\x70\x65\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\x61\x6d\x65\x73\x70\x61\
\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\x70\x65\x22\x0d\x0a\x20\
\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x36\x34\x22\x0d\x0a\x20\x20\
\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x36\x34\x22\x0d\x0a\x20\x20\
\x20\x69\x64\x3d\x22\x73\x76\x67\x34\x37\x30\x30\x22\x0d\x0a\x20\
\x20\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0d\
\x0a\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\
\x73\x69\x6f\x6e\x3d\x22\x30\x2e\x39\x31\x20\x72\x31\x33\x37\x32\
\x35\x22\x0d\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\
\x30\x20\x30\x20\x36\x34\x20\x36\x34\x22\x0d\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x73\x74\x6f\x70\x2e\x73\x76\x67\x22\x3e\x0d\x0a\x20\x20\x3c\
\x64\x65\x66\x73\x0d\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\
\x65\x66\x73\x34\x37\x30\x32\x22\x20\x2f\x3e\x0d\x0a\x20\x20\x3c\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\
\x65\x77\x0d\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x62\x61\x73\
\x65\x22\x0d\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\
\x6f\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0d\x0a\x20\x20\
\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\
\x23\x36\x36\x36\x36\x36\x36\x22\x0d\x0a\x20\x20\x20\x20\x20\x62\
\x6f\x72\x64\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x2e\
\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\
\x2e\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\
\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x7a\x6f\x6f\x6d\x3d\x22\x31\x31\x22\x0d\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x78\x3d\x22\x32\x33\
\x2e\x33\x36\x37\x32\x30\x36\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\x22\x33\x30\x2e\x37\
\x30\x37\x39\x31\x39\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\x2d\x6c\x61\
\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0d\x0a\x20\x20\
\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x74\x72\x75\
\x65\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x64\x6f\x63\x75\x6d\x65\x6e\x74\x2d\x75\x6e\x69\x74\x73\
\x3d\x22\x70\x78\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\x22\
\x74\x72\x75\x65\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\x74\
\x68\x3d\x22\x31\x39\x32\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x68\
\x65\x69\x67\x68\x74\x3d\x22\x31\x30\x33\x30\x22\x0d\x0a\x20\x20\
\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\
\x6f\x77\x2d\x78\x3d\x22\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\
\x3d\x22\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\
\x69\x7a\x65\x64\x3d\x22\x31\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6f\x62\x6a\x65\x63\x74\x2d\x6e\
\x6f\x64\x65\x73\x3d\x22\x74\x72\x75\x65\x22\x3e\x0d\x0a\x20\x20\
\x20\x20\x3c\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x69\x64\
\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x74\x79\x70\x65\x3d\x22\x78\
\x79\x67\x72\x69\x64\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x67\x72\x69\x64\x35\x32\x34\x38\x22\x20\x2f\x3e\x0d\
\x0a\x20\x20\x3c\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\
\x6d\x65\x64\x76\x69\x65\x77\x3e\x0d\x0a\x20\x20\x3c\x6d\x65\x74\
\x61\x64\x61\x74\x61\x0d\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x6d\x65\x74\x61\x64\x61\x74\x61\x34\x37\x30\x35\x22\x3e\x0d\x0a\
\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0d\x0a\x20\
\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0d\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\
\x74\x3d\x22\x22\x3e\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\
\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\
\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\
\x61\x74\x3e\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\
\x3a\x74\x79\x70\x65\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\
\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\x6c\
\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0d\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x20\x2f\x3e\
\x0d\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\x72\
\x6b\x3e\x0d\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\
\x46\x3e\x0d\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\x74\x61\
\x3e\x0d\x0a\x20\x20\x3c\x67\x0d\x0a\x20\x20\x20\x20\x20\x69\x64\
\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\x22\
\x4c\x61\x79\x65\x72\x20\x31\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\x64\
\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x3e\x0d\x0a\x20\x20\x20\x20\
\x3c\x72\x65\x63\x74\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\
\x79\x6c\x65\x3d\x22\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x66\
\x69\x6c\x6c\x3a\x23\x64\x34\x30\x30\x30\x30\x3b\x66\x69\x6c\x6c\
\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x66\x69\x6c\x6c\x2d\
\x72\x75\x6c\x65\x3a\x6e\x6f\x6e\x7a\x65\x72\x6f\x3b\x73\x74\x72\
\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\
\x77\x69\x64\x74\x68\x3a\x30\x2e\x38\x30\x30\x30\x30\x30\x30\x31\
\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x63\x61\x70\x3a\
\x72\x6f\x75\x6e\x64\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\
\x65\x6a\x6f\x69\x6e\x3a\x72\x6f\x75\x6e\x64\x3b\x73\x74\x72\x6f\
\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\x72\x61\x79\
\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\
\x63\x69\x74\x79\x3a\x31\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\
\x69\x64\x3d\x22\x72\x65\x63\x74\x35\x32\x35\x30\x22\x0d\x0a\x20\
\x20\x20\x20\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x32\x38\x22\
\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\
\x22\x32\x38\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x78\x3d\x22\
\x31\x38\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x79\x3d\x22\x31\
\x38\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x72\x79\x3d\x22\x30\
\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x72\x78\x3d\x22\x30\x22\
\x20\x2f\x3e\x0d\x0a\x20\x20\x3c\x2f\x67\x3e\x0d\x0a\x3c\x2f\x73\
\x76\x67\x3e\x0d\x0a\
\x00\x00\x08\x51\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0d\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\
\x74\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\
\x65\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\
\x0d\x0a\x0d\x0a\x3c\x73\x76\x67\x0d\x0a\x20\x20\x20\x78\x6d\x6c\
\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\
\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\x6e\
\x74\x73\x2f\x31\x2e\x31\x2f\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\
\x6e\x73\x3a\x63\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\
\x65\x61\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\
\x67\x2f\x6e\x73\x23\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\
\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\
\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\
\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\
\x23\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\
\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0d\x0a\x20\
\x20\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\
\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\
\x73\x76\x67\x22\x0d\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\x73\x6f\x75\x72\x63\x65\x66\
\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\x44\x54\x44\x2f\x73\x6f\x64\
\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\x74\x64\x22\x0d\x0a\x20\x20\
\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\x73\x63\x61\x70\x65\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\x61\x6d\x65\x73\x70\x61\
\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\x70\x65\x22\x0d\x0a\x20\
\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x36\x34\x22\x0d\x0a\x20\x20\
\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x36\x34\x22\x0d\x0a\x20\x20\
\x20\x69\x64\x3d\x22\x73\x76\x67\x34\x37\x30\x30\x22\x0d\x0a\x20\
\x20\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0d\
\x0a\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\
\x73\x69\x6f\x6e\x3d\x22\x30\x2e\x39\x31\x20\x72\x31\x33\x37\x32\
\x35\x22\x0d\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\
\x30\x20\x30\x20\x36\x34\x20\x36\x34\x22\x0d\x0a\x20\x20\x20\x73\
\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\
\x22\x73\x74\x61\x72\x74\x2e\x73\x76\x67\x22\x3e\x0d\x0a\x20\x20\
\x3c\x64\x65\x66\x73\x0d\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\
\x64\x65\x66\x73\x34\x37\x30\x32\x22\x20\x2f\x3e\x0d\x0a\x20\x20\
\x3c\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\
\x69\x65\x77\x0d\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x62\x61\
\x73\x65\x22\x0d\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\
\x6c\x6f\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0d\x0a\x20\
\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\
\x22\x23\x36\x36\x36\x36\x36\x36\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x62\x6f\x72\x64\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\
\x2e\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\
\x30\x2e\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\
\x32\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x31\x31\x22\x0d\x0a\x20\x20\x20\
\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x78\x3d\x22\x39\
\x2e\x34\x37\x34\x36\x37\x36\x38\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\x22\x32\x38\x2e\
\x32\x35\x35\x34\x34\x37\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\x2d\x6c\
\x61\x79\x65\x72\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0d\x0a\x20\
\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x74\x72\
\x75\x65\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x64\x6f\x63\x75\x6d\x65\x6e\x74\x2d\x75\x6e\x69\x74\
\x73\x3d\x22\x70\x78\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x67\x72\x69\x64\x2d\x62\x62\x6f\x78\x3d\
\x22\x74\x72\x75\x65\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\
\x74\x68\x3d\x22\x31\x39\x32\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\
\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x30\x33\x30\x22\x0d\x0a\x20\
\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\
\x64\x6f\x77\x2d\x78\x3d\x22\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\
\x79\x3d\x22\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\
\x6d\x69\x7a\x65\x64\x3d\x22\x31\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6f\x62\x6a\x65\x63\x74\x2d\
\x6e\x6f\x64\x65\x73\x3d\x22\x74\x72\x75\x65\x22\x3e\x0d\x0a\x20\
\x20\x20\x20\x3c\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x69\
\x64\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x74\x79\x70\x65\x3d\x22\
\x78\x79\x67\x72\x69\x64\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\
\x69\x64\x3d\x22\x67\x72\x69\x64\x35\x32\x34\x38\x22\x20\x2f\x3e\
\x0d\x0a\x20\x20\x3c\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\
\x61\x6d\x65\x64\x76\x69\x65\x77\x3e\x0d\x0a\x20\x20\x3c\x6d\x65\
\x74\x61\x64\x61\x74\x61\x0d\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\
\x22\x6d\x65\x74\x61\x64\x61\x74\x61\x34\x37\x30\x35\x22\x3e\x0d\
\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0d\x0a\
\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0d\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\
\x75\x74\x3d\x22\x22\x3e\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x3c\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\x67\x65\
\x2f\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\x6f\x72\
\x6d\x61\x74\x3e\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\
\x63\x3a\x74\x79\x70\x65\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\
\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\
\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0d\x0a\x20\x20\x20\
\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x20\x2f\
\x3e\x0d\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\
\x72\x6b\x3e\x0d\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\x52\
\x44\x46\x3e\x0d\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\x74\
\x61\x3e\x0d\x0a\x20\x20\x3c\x67\x0d\x0a\x20\x20\x20\x20\x20\x69\
\x64\x3d\x22\x6c\x61\x79\x65\x72\x31\x22\x0d\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x6c\x61\x62\x65\x6c\x3d\
\x22\x4c\x61\x79\x65\x72\x20\x31\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x67\x72\x6f\x75\x70\x6d\x6f\
\x64\x65\x3d\x22\x6c\x61\x79\x65\x72\x22\x3e\x0d\x0a\x20\x20\x20\
\x20\x3c\x70\x61\x74\x68\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x73\
\x74\x79\x6c\x65\x3d\x22\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\
\x66\x69\x6c\x6c\x3a\x23\x30\x30\x62\x37\x30\x30\x3b\x66\x69\x6c\
\x6c\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x31\x3b\x66\x69\x6c\x6c\
\x2d\x72\x75\x6c\x65\x3a\x6e\x6f\x6e\x7a\x65\x72\x6f\x3b\x73\x74\
\x72\x6f\x6b\x65\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\
\x2d\x77\x69\x64\x74\x68\x3a\x30\x2e\x38\x30\x30\x30\x30\x30\x30\
\x31\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x63\x61\x70\
\x3a\x72\x6f\x75\x6e\x64\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\
\x6e\x65\x6a\x6f\x69\x6e\x3a\x72\x6f\x75\x6e\x64\x3b\x73\x74\x72\
\x6f\x6b\x65\x2d\x6d\x69\x74\x65\x72\x6c\x69\x6d\x69\x74\x3a\x34\
\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x64\x61\x73\x68\x61\x72\x72\x61\
\x79\x3a\x6e\x6f\x6e\x65\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\
\x61\x63\x69\x74\x79\x3a\x31\x22\x0d\x0a\x20\x20\x20\x20\x20\x20\
\x20\x64\x3d\x22\x4d\x20\x31\x38\x2c\x31\x35\x20\x34\x36\x2c\x33\
\x32\x20\x31\x38\x2c\x34\x39\x20\x5a\x22\x0d\x0a\x20\x20\x20\x20\
\x20\x20\x20\x69\x64\x3d\x22\x72\x65\x63\x74\x34\x31\x33\x35\x22\
\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x3a\x63\x6f\x6e\x6e\x65\x63\x74\x6f\x72\x2d\x63\x75\x72\x76\
\x61\x74\x75\x72\x65\x3d\x22\x30\x22\x0d\x0a\x20\x20\x20\x20\x20\
\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x6f\x64\x65\x74\
\x79\x70\x65\x73\x3d\x22\x63\x63\x63\x63\x22\x20\x2f\x3e\x0d\x0a\
\x20\x20\x3c\x2f\x67\x3e\x0d\x0a\x3c\x2f\x73\x76\x67\x3e\x0d\x0a\
\
\x00\x00\x06\xf7\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\
\x6c\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\
\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\
\x6e\x74\x73\x2f\x31\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\
\x6e\x73\x3a\x63\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\
\x65\x61\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\
\x67\x2f\x6e\x73\x23\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\
\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\
\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\
\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\
\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\
\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\
\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\
\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\
\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\
\x6f\x64\x69\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\
\x70\x6f\x64\x69\x2e\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\
\x2e\x6e\x65\x74\x2f\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\
\x69\x2d\x30\x2e\x64\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\
\x73\x3a\x69\x6e\x6b\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\
\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\
\x6f\x72\x67\x2f\x6e\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x22\x0a\x20\x20\x20\x77\x69\x64\x74\
\x68\x3d\x22\x32\x34\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\
\x3d\x22\x32\x34\x22\x0a\x20\x20\x20\x76\x69\x65\x77\x42\x6f\x78\
\x3d\x22\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x0a\x20\x20\x20\
\x66\x69\x6c\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x0a\x20\x20\x20\x73\
\x74\x72\x6f\x6b\x65\x3d\x22\x23\x30\x30\x30\x22\x0a\x20\x20\x20\
\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3d\x22\x32\x22\
\x0a\x20\x20\x20\x73\x74\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x63\
\x61\x70\x3d\x22\x72\x6f\x75\x6e\x64\x22\x0a\x20\x20\x20\x73\x74\
\x72\x6f\x6b\x65\x2d\x6c\x69\x6e\x65\x6a\x6f\x69\x6e\x3d\x22\x72\
\x6f\x75\x6e\x64\x22\x0a\x20\x20\x20\x76\x65\x72\x73\x69\x6f\x6e\
\x3d\x22\x31\x2e\x31\x22\x0a\x20\x20\x20\x69\x64\x3d\x22\x73\x76\
\x67\x34\x22\x0a\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\
\x64\x6f\x63\x6e\x61\x6d\x65\x3d\x22\x63\x68\x65\x76\x72\x6f\x6e\
\x2d\x64\x6f\x77\x6e\x2e\x73\x76\x67\x22\x0a\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\
\x30\x2e\x39\x32\x2e\x31\x20\x72\x31\x35\x33\x37\x31\x22\x3e\x0a\
\x20\x20\x3c\x6d\x65\x74\x61\x64\x61\x74\x61\x0a\x20\x20\x20\x20\
\x20\x69\x64\x3d\x22\x6d\x65\x74\x61\x64\x61\x74\x61\x31\x30\x22\
\x3e\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\
\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\x57\x6f\x72\x6b\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\
\x74\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\
\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x73\
\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\x3a\x66\x6f\x72\x6d\x61\
\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\
\x79\x70\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\
\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\x65\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\
\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\x74\x69\x6c\x6c\x49\x6d\
\x61\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\x3c\x2f\x64\x63\x3a\x74\
\x69\x74\x6c\x65\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x2f\x63\x63\
\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\x20\x3c\x2f\x72\x64\x66\
\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\x6d\x65\x74\x61\x64\x61\
\x74\x61\x3e\x0a\x20\x20\x3c\x64\x65\x66\x73\x0a\x20\x20\x20\x20\
\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x38\x22\x20\x2f\x3e\x0a\x20\
\x20\x3c\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\
\x76\x69\x65\x77\x0a\x20\x20\x20\x20\x20\x70\x61\x67\x65\x63\x6f\
\x6c\x6f\x72\x3d\x22\x23\x66\x66\x66\x66\x66\x66\x22\x0a\x20\x20\
\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\
\x23\x36\x36\x36\x36\x36\x36\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\
\x72\x64\x65\x72\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x31\x22\x0a\
\x20\x20\x20\x20\x20\x6f\x62\x6a\x65\x63\x74\x74\x6f\x6c\x65\x72\
\x61\x6e\x63\x65\x3d\x22\x31\x30\x22\x0a\x20\x20\x20\x20\x20\x67\
\x72\x69\x64\x74\x6f\x6c\x65\x72\x61\x6e\x63\x65\x3d\x22\x31\x30\
\x22\x0a\x20\x20\x20\x20\x20\x67\x75\x69\x64\x65\x74\x6f\x6c\x65\
\x72\x61\x6e\x63\x65\x3d\x22\x31\x30\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\
\x63\x69\x74\x79\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x3a\x70\x61\x67\x65\x73\x68\x61\x64\x6f\
\x77\x3d\x22\x32\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x77\x69\x64\x74\x68\
\x3d\x22\x31\x39\x32\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x68\x65\x69\
\x67\x68\x74\x3d\x22\x31\x30\x30\x31\x22\x0a\x20\x20\x20\x20\x20\
\x69\x64\x3d\x22\x6e\x61\x6d\x65\x64\x76\x69\x65\x77\x36\x22\x0a\
\x20\x20\x20\x20\x20\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x66\
\x61\x6c\x73\x65\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\
\x61\x70\x65\x3a\x7a\x6f\x6f\x6d\x3d\x22\x31\x33\x2e\x39\x30\x36\
\x34\x33\x33\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x3a\x63\x78\x3d\x22\x2d\x33\x2e\x38\x33\x30\x30\x33\x38\
\x31\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3a\x63\x79\x3d\x22\x36\x2e\x37\x32\x38\x38\x32\x34\x37\x22\x0a\
\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\
\x6e\x64\x6f\x77\x2d\x78\x3d\x22\x2d\x39\x22\x0a\x20\x20\x20\x20\
\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\
\x2d\x79\x3d\x22\x2d\x39\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x6d\x61\x78\
\x69\x6d\x69\x7a\x65\x64\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\
\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x75\x72\x72\x65\x6e\x74\
\x2d\x6c\x61\x79\x65\x72\x3d\x22\x73\x76\x67\x34\x22\x20\x2f\x3e\
\x0a\x20\x20\x3c\x70\x6f\x6c\x79\x6c\x69\x6e\x65\x0a\x20\x20\x20\
\x20\x20\x70\x6f\x69\x6e\x74\x73\x3d\x22\x31\x38\x20\x31\x35\x20\
\x31\x32\x20\x39\x20\x36\x20\x31\x35\x22\x0a\x20\x20\x20\x20\x20\
\x69\x64\x3d\x22\x70\x6f\x6c\x79\x6c\x69\x6e\x65\x32\x22\x0a\x20\
\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\x6c\x6c\x3a\
\x6e\x6f\x6e\x65\x3b\x66\x69\x6c\x6c\x2d\x6f\x70\x61\x63\x69\x74\
\x79\x3a\x31\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x23\x32\x31\x62\x37\
\x30\x30\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\
\x79\x3a\x31\x22\x0a\x20\x20\x20\x20\x20\x74\x72\x61\x6e\x73\x66\
\x6f\x72\x6d\x3d\x22\x6d\x61\x74\x72\x69\x78\x28\x31\x2e\x37\x30\
\x33\x36\x39\x38\x32\x2c\x30\x2c\x30\x2c\x2d\x31\x2e\x37\x30\x33\
\x36\x39\x38\x32\x2c\x2d\x38\x2e\x34\x34\x33\x35\x38\x36\x37\x2c\
\x33\x32\x2e\x31\x36\x39\x34\x31\x38\x29\x22\x20\x2f\x3e\x0a\x3c\
\x2f\x73\x76\x67\x3e\x0a\
"
qt_resource_name = b"\
\x00\x0c\
\x04\x81\x11\xfe\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x76\x00\x72\x00\x6f\x00\x6e\
\x00\x04\
\x00\x07\x72\x89\
\x00\x70\
\x00\x6c\x00\x61\x00\x79\
\x00\x04\
\x00\x07\xab\x60\
\x00\x73\
\x00\x74\x00\x6f\x00\x70\
\x00\x0a\
\x09\xe3\x55\xfe\
\x00\x75\
\x00\x70\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x76\x00\x72\x00\x6f\x00\x6e\
\x00\x0e\
\x0b\x58\x6c\xa7\
\x00\x63\
\x00\x68\x00\x65\x00\x76\x00\x72\x00\x6f\x00\x6e\x00\x5f\x00\x75\x00\x70\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x08\
\x0b\x63\x55\x87\
\x00\x73\
\x00\x74\x00\x6f\x00\x70\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x09\
\x08\x97\xaf\x87\
\x00\x73\
\x00\x74\x00\x61\x00\x72\x00\x74\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x10\
\x0e\x17\x2a\x87\
\x00\x63\
\x00\x68\x00\x65\x00\x76\x00\x72\x00\x6f\x00\x6e\x00\x5f\x00\x64\x00\x6f\x00\x77\x00\x6e\x00\x2e\x00\x73\x00\x76\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x01\
\x00\x00\x00\x1e\x00\x02\x00\x00\x00\x01\x00\x00\x00\x08\
\x00\x00\x00\x2c\x00\x02\x00\x00\x00\x01\x00\x00\x00\x07\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x06\
\x00\x00\x00\x3a\x00\x02\x00\x00\x00\x01\x00\x00\x00\x05\
\x00\x00\x00\x54\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xa4\x00\x00\x00\x00\x00\x01\x00\x00\x17\x98\
\x00\x00\x00\x76\x00\x00\x00\x00\x00\x01\x00\x00\x06\xf9\
\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x0f\x43\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x1e\x00\x02\x00\x00\x00\x01\x00\x00\x00\x08\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x2c\x00\x02\x00\x00\x00\x01\x00\x00\x00\x07\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x06\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x3a\x00\x02\x00\x00\x00\x01\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x54\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x69\xc9\x81\x48\xec\
\x00\x00\x00\xa4\x00\x00\x00\x00\x00\x01\x00\x00\x17\x98\
\x00\x00\x01\x69\xc8\xe3\xb6\x92\
\x00\x00\x00\x76\x00\x00\x00\x00\x00\x01\x00\x00\x06\xf9\
\x00\x00\x01\x69\xc4\x3a\xfd\xd0\
\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x0f\x43\
\x00\x00\x01\x69\xc4\x3a\xfd\xcb\
"
qt_version = QtCore.qVersion().split(".")
if qt_version < ["5", "8", "0"]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
def qCleanupResources():
QtCore.qUnregisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
qInitResources()
| 58.628524
| 121
| 0.725483
| 8,465
| 35,353
| 3.025399
| 0.019374
| 0.206638
| 0.223858
| 0.194924
| 0.959274
| 0.957282
| 0.953612
| 0.952089
| 0.951269
| 0.946349
| 0
| 0.411887
| 0.020083
| 35,353
| 602
| 122
| 58.725914
| 0.327368
| 0.004271
| 0
| 0.367067
| 0
| 0.876501
| 0.000114
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.003431
| false
| 0
| 0.001715
| 0
| 0.005146
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
6a7e50a53d1f785658e81e7ea1992e28ac663a2c
| 13,859
|
py
|
Python
|
internetradio/src/InternetRadioOledDisplay.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | 1
|
2020-01-27T22:53:56.000Z
|
2020-01-27T22:53:56.000Z
|
internetradio/src/InternetRadioOledDisplay.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | null | null | null |
internetradio/src/InternetRadioOledDisplay.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | 11
|
2015-02-26T20:59:14.000Z
|
2021-09-20T08:23:03.000Z
|
#
# InternetRadio E2
#
# Coded by Dr.Best (c) 2012
# Support: www.dreambox-tools.info
# E-Mail: dr.best@dreambox-tools.info
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Multimedia GmbH.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Multimedia GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Multimedia GmbH.
#
# If you want to use or modify the code or parts of it,
# you have to keep MY license and inform me about the modifications by mail.
#
from Screens.Screen import Screen
from Components.ProgressBar import ProgressBar
from Components.Pixmap import Pixmap
from Components.Label import Label
from InternetRadioVisualization import InternetRadioVisualization
class InternetRadioOledDisplay(Screen, InternetRadioVisualization):
# helper for skinning
# skincontent = ""
# skincontent2 = ""
# count = 16
# x = 0
# posx = 1
# skinid = 0
# while True:
# skincontent += "<widget name=\"progress_%d\" zPosition=\"3\" position=\"%d,5\" size=\"5,50\" transparent=\"1\" orientation=\"orBottomToTop\" pixmap=\"/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png\" />\n" % (x,posx)
# skincontent2 += "<widget name=\"top_%d\" position=\"%d,5\" zPosition=\"6\" size=\"5,2\" transparent=\"1\" pixmap=\"/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png\" />\n" % (x,posx)
# posx += 6
# x += 1
# if x == count:
# break
skin = ("""
<screen name="InternetRadioOledDisplay" position="0,0" size="96,64" id="2">
<widget name="text1" position="4,0" size="96,14" font="Regular;12" halign="center" valign="center"/>
<widget name="text2" position="4,14" size="96,49" font="Regular;10" halign="center" valign="center"/>
<widget name="progress_0" zPosition="3" position="1,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_1" zPosition="3" position="7,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_2" zPosition="3" position="13,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_3" zPosition="3" position="19,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_4" zPosition="3" position="25,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_5" zPosition="3" position="31,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_6" zPosition="3" position="37,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_7" zPosition="3" position="43,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_8" zPosition="3" position="49,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_9" zPosition="3" position="55,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_10" zPosition="3" position="61,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_11" zPosition="3" position="67,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_12" zPosition="3" position="73,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_13" zPosition="3" position="79,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_14" zPosition="3" position="85,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="progress_15" zPosition="3" position="91,5" size="5,50" transparent="1" orientation="orBottomToTop" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled-fs8.png" />
<widget name="top_0" position="1,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_1" position="7,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_2" position="13,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_3" position="19,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_4" position="25,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_5" position="31,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_6" position="37,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_7" position="43,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_8" position="49,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_9" position="55,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_10" position="61,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_11" position="67,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_12" position="73,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_13" position="79,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_14" position="85,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
<widget name="top_15" position="91,5" zPosition="6" size="5,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/bar_oled_top-fs8.png" />
</screen>""","""
<screen name="InternetRadioOledDisplay" position="0,0" size="132,64" id="1">
<widget name="text1" position="4,0" size="132,14" font="Regular;12" halign="center" valign="center"/>
<widget name="text2" position="4,14" size="132,49" font="Regular;10" halign="center" valign="center"/>
<widget name="progress_0" zPosition="3" position="2,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_1" zPosition="3" position="10,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_2" zPosition="3" position="18,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_3" zPosition="3" position="26,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_4" zPosition="3" position="34,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_5" zPosition="3" position="42,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_6" zPosition="3" position="50,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_7" zPosition="3" position="58,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_8" zPosition="3" position="66,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_9" zPosition="3" position="74,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_10" zPosition="3" position="82,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_11" zPosition="3" position="90,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_12" zPosition="3" position="98,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_13" zPosition="3" position="106,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_14" zPosition="3" position="114,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="progress_15" zPosition="3" position="122,5" size="7,50" transparent="1" orientation="orBottomToTop" />
<widget name="top_0" position="2,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_1" position="10,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_2" position="18,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_3" position="26,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_4" position="34,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_5" position="42,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_6" position="50,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_7" position="58,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_8" position="66,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_9" position="74,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_10" position="82,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_11" position="90,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_12" position="98,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_13" position="106,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_14" position="114,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
<widget name="top_15" position="122,5" zPosition="6" size="7,2" transparent="1" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/InternetRadio/images/topvalue-lcd-fs8.png" />
</screen>""")
def __init__(self, session, parent):
Screen.__init__(self, session)
InternetRadioVisualization.__init__(self)
self["text1"] = Label(_("Internet Radio"))
self["text2"] = Label("")
self.onLayoutFinish.append(self.startRun)
def startRun(self):
self.setProperties()
self.hideControls()
self["text1"].hide()
self["text2"].hide()
def setText(self, text):
self["text2"].setText(text)
def setLabelVisibility(self,value):
if value:
self["text1"].show()
self["text2"].show()
self.hideControls()
else:
self["text1"].hide()
self["text2"].hide()
| 94.278912
| 253
| 0.735551
| 2,009
| 13,859
| 5.009457
| 0.095072
| 0.069555
| 0.059618
| 0.094396
| 0.866753
| 0.857313
| 0.852146
| 0.750994
| 0.750994
| 0.741653
| 0
| 0.059527
| 0.082401
| 13,859
| 146
| 254
| 94.924658
| 0.731855
| 0.097265
| 0
| 0.06
| 0
| 0.7
| 0.929614
| 0.426968
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.05
| 0
| 0.11
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6af074d2754f1959922f4345aca7f4b8b10b165a
| 356
|
py
|
Python
|
evalutils/__init__.py
|
coendevente/evalutils
|
eb22da85ab27819a6da1249fe5111c7e36185369
|
[
"MIT"
] | null | null | null |
evalutils/__init__.py
|
coendevente/evalutils
|
eb22da85ab27819a6da1249fe5111c7e36185369
|
[
"MIT"
] | null | null | null |
evalutils/__init__.py
|
coendevente/evalutils
|
eb22da85ab27819a6da1249fe5111c7e36185369
|
[
"MIT"
] | null | null | null |
from .evalutils import (
ClassificationAlgorithm,
ClassificationEvaluation,
DetectionAlgorithm,
DetectionEvaluation,
Evaluation,
SegmentationAlgorithm,
)
__all__ = [
"ClassificationAlgorithm",
"ClassificationEvaluation",
"DetectionAlgorithm",
"DetectionEvaluation",
"Evaluation",
"SegmentationAlgorithm",
]
| 19.777778
| 31
| 0.719101
| 16
| 356
| 15.75
| 0.625
| 0.373016
| 0.515873
| 0.666667
| 0.912698
| 0.912698
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196629
| 356
| 17
| 32
| 20.941176
| 0.881119
| 0
| 0
| 0
| 0
| 0
| 0.323034
| 0.191011
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0aab525284d67792ecdf98ec041e5ccbbaceb15b
| 24,495
|
py
|
Python
|
tests/test_dhis.py
|
rileyhazard/smartva-dhis2
|
04a6326bbffb4050c3af33a66b2c183af33c7069
|
[
"MIT"
] | 3
|
2018-06-14T03:30:12.000Z
|
2018-07-23T06:39:20.000Z
|
tests/test_dhis.py
|
rileyhazard/smartva-dhis2
|
04a6326bbffb4050c3af33a66b2c183af33c7069
|
[
"MIT"
] | 17
|
2018-04-23T12:48:32.000Z
|
2018-12-10T18:14:09.000Z
|
tests/test_dhis.py
|
rileyhazard/smartva-dhis2
|
04a6326bbffb4050c3af33a66b2c183af33c7069
|
[
"MIT"
] | 6
|
2018-05-09T03:51:20.000Z
|
2020-11-10T08:24:53.000Z
|
import pytest
from smartvadhis2.core.dhis import RaiseImportFailure, raise_if_duplicate, Dhis
from smartvadhis2.core.exceptions.errors import *
from smartvadhis2.core.exceptions.base import DhisApiException
def test_import_orgunit_invalid():
response = {
"httpStatus": "Conflict",
"httpStatusCode": 409,
"status": "ERROR",
"message": "An error occurred, please check import summary.",
"response": {
"responseType": "ImportSummaries",
"status": "ERROR",
"imported": 0,
"updated": 0,
"deleted": 0,
"ignored": 1,
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"importSummaries": [
{
"responseType": "ImportSummary",
"status": "ERROR",
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"description": "Event.orgUnit does not point to a valid organisation unit: tbd",
"importCount": {
"imported": 0,
"updated": 0,
"ignored": 1,
"deleted": 0
}
}
]
}
}
with pytest.raises(OrgunitNotValidImportError):
RaiseImportFailure(response)
def test_import_program_invalid():
response = {
"httpStatus": "Conflict",
"httpStatusCode": 409,
"status": "ERROR",
"message": "An error occurred, please check import summary.",
"response": {
"responseType": "ImportSummaries",
"status": "ERROR",
"imported": 0,
"updated": 0,
"deleted": 0,
"ignored": 1,
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"importSummaries": [
{
"responseType": "ImportSummary",
"status": "ERROR",
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"description": "Event.program does not point to a valid program: abc",
"importCount": {
"imported": 0,
"updated": 0,
"ignored": 1,
"deleted": 0
}
}
]
}
}
with pytest.raises(ProgramNotValidError):
RaiseImportFailure(response)
def test_import_success():
response = {
"httpStatus": "OK",
"httpStatusCode": 200,
"status": "OK",
"message": "Import was successful.",
"response": {
"responseType": "ImportSummaries",
"status": "SUCCESS",
"imported": 2,
"updated": 4,
"deleted": 0,
"ignored": 0,
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"importSummaries": [
{
"responseType": "ImportSummary",
"status": "SUCCESS",
"importCount": {
"imported": 2,
"updated": 0,
"ignored": 0,
"deleted": 0
},
"reference": "IgEemKlf33z",
"href": "https://play.dhis2.org/2.28/api/events/IgEemKlf33z"
},
{
"responseType": "ImportSummary",
"status": "SUCCESS",
"importCount": {
"imported": 0,
"updated": 2,
"ignored": 0,
"deleted": 0
},
"reference": "onXW2DQHRGS",
"href": "https://play.dhis2.org/2.28/api/events/onXW2DQHRGS"
},
{
"responseType": "ImportSummary",
"status": "SUCCESS",
"importCount": {
"imported": 0,
"updated": 2,
"ignored": 0,
"deleted": 0
},
"reference": "A7vnB73x5Xw",
"href": "https://play.dhis2.org/2.28/api/events/A7vnB73x5Xw"
}
]
}
}
import_status = RaiseImportFailure(response)
assert import_status.status_code == 200
assert import_status.imported == 2
assert import_status.deleted == 0
assert import_status.updated == 4
assert import_status.ignored == 0
assert import_status.deleted == 0
def test_import_conflict_dataelement_invalid():
response = {
"httpStatus": "Conflict",
"httpStatusCode": 409,
"status": "WARNING",
"message": "One more conflicts encountered, please check import summary.",
"response": {
"responseType": "ImportSummaries",
"status": "WARNING",
"imported": 2,
"updated": 3,
"deleted": 0,
"ignored": 1,
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"importSummaries": [
{
"responseType": "ImportSummary",
"status": "SUCCESS",
"importCount": {
"imported": 2,
"updated": 0,
"ignored": 0,
"deleted": 0
},
"reference": "iG9fBAyM71U",
"href": "https://play.dhis2.org/2.28/api/events/iG9fBAyM71U"
},
{
"responseType": "ImportSummary",
"status": "WARNING",
"importCount": {
"imported": 0,
"updated": 1,
"ignored": 1,
"deleted": 0
},
"conflicts": [
{
"object": "dataElement",
"value": "sWoqcoByYmE is not a valid data element"
}
],
"reference": "onXW2DQHRGS",
"href": "https://play.dhis2.org/2.28/api/events/onXW2DQHRGS"
},
{
"responseType": "ImportSummary",
"status": "SUCCESS",
"importCount": {
"imported": 0,
"updated": 2,
"ignored": 0,
"deleted": 0
},
"reference": "A7vnB73x5Xw",
"href": "https://play.dhis2.org/2.28/api/events/A7vnB73x5Xw"
}
]
}
}
with pytest.raises(GenericImportError):
RaiseImportFailure(response)
def test_empty_post_throws():
response = {
"httpStatus": "OK",
"httpStatusCode": 200,
"status": "OK",
"message": "Import was successful.",
"response": {
"responseType": "ImportSummaries",
"status": "SUCCESS",
"imported": 0,
"updated": 0,
"deleted": 0,
"ignored": 0,
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
}
}
}
with pytest.raises(GenericImportError):
RaiseImportFailure(response)
def test_event_duplicate_found():
sid = "VA_12345678912345"
response = {
"headers": [
{
"name": "event",
"column": "event",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "created",
"column": "created",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "lastUpdated",
"column": "lastUpdated",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "storedBy",
"column": "storedBy",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "completedBy",
"column": "completedBy",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "completedDate",
"column": "completedDate",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "eventDate",
"column": "eventDate",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "dueDate",
"column": "dueDate",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "orgUnit",
"column": "orgUnit",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "orgUnitName",
"column": "orgUnitName",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "status",
"column": "status",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "longitude",
"column": "longitude",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "latitude",
"column": "latitude",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "programStage",
"column": "programStage",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "program",
"column": "program",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "attributeOptionCombo",
"column": "attributeOptionCombo",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "deleted",
"column": "deleted",
"type": "java.lang.String",
"hidden": False,
"meta": False
}
],
"rows": [
[
"zLPwmHJVr09",
"2018-04-19 07:43:30.068",
"2018-04-19 07:43:30.078",
"smartvadhis2_v0.0.1",
"bao-admin",
"2018-04-19 00:00:00.0",
"2018-03-26 00:00:00.0",
"2018-04-19 07:43:30.068",
"MJ0S8In5PIQ",
"Gournadi Upazila",
"COMPLETED",
"",
"",
"pQ8gaWKD3pi",
"HPrJOsYuM1K",
"HllvX50cXC0",
"False"
]
],
"width": 17,
"height": 1
}
with pytest.raises(DuplicateEventImportError):
raise_if_duplicate(response, sid)
def test_event_no_duplicate():
sid = "VA_12345678912345"
response = {
"headers": [
{
"name": "event",
"column": "event",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "created",
"column": "created",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "lastUpdated",
"column": "lastUpdated",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "storedBy",
"column": "storedBy",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "completedBy",
"column": "completedBy",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "completedDate",
"column": "completedDate",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "eventDate",
"column": "eventDate",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "dueDate",
"column": "dueDate",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "orgUnit",
"column": "orgUnit",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "orgUnitName",
"column": "orgUnitName",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "status",
"column": "status",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "longitude",
"column": "longitude",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "latitude",
"column": "latitude",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "programStage",
"column": "programStage",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "program",
"column": "program",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "attributeOptionCombo",
"column": "attributeOptionCombo",
"type": "java.lang.String",
"hidden": False,
"meta": False
},
{
"name": "deleted",
"column": "deleted",
"type": "java.lang.String",
"hidden": False,
"meta": False
}
],
"rows": [],
"width": 17,
"height": 0
}
raise_if_duplicate(response, sid)
def test_orgunit_not_assigned():
response = {
"httpStatus": "Conflict",
"httpStatusCode": 409,
"status": "ERROR",
"message": "An error occurred, please check import summary.",
"response": {
"responseType": "ImportSummaries",
"status": "ERROR",
"imported": 0,
"updated": 0,
"deleted": 0,
"ignored": 1,
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"importSummaries": [
{
"responseType": "ImportSummary",
"status": "ERROR",
"importOptions": {
"idSchemes": {},
"dryRun": False,
"async": False,
"importStrategy": "CREATE",
"mergeMode": "REPLACE",
"reportMode": "FULL",
"skipExistingCheck": False,
"sharing": False,
"skipNotifications": False,
"datasetAllowsPeriods": False,
"strictPeriods": False,
"strictCategoryOptionCombos": False,
"strictAttributeOptionCombos": False,
"strictOrganisationUnits": False,
"requireCategoryOptionCombo": False,
"requireAttributeOptionCombo": False
},
"description": "Program is not assigned to this organisation unit: MJ0S8In5PIQ",
"importCount": {
"imported": 0,
"updated": 0,
"ignored": 1,
"deleted": 0
}
}
]
}
}
with pytest.raises(OrgUnitNotAssignedError):
RaiseImportFailure(response)
def test_generic_import_error():
response = {
"Unknown response"
}
with pytest.raises(GenericImportError):
RaiseImportFailure(response)
def test_get_root_orgunit():
response = {
"organisationUnits": [
{
"id": "ImspTQPwCqd"
}
]
}
assert 'ImspTQPwCqd' == Dhis._get_root_id(response)
def test_get_root_orgunit_multiple():
response = {
"organisationUnits": [
{
"id": "ImspTQPwCqd"
},
{
"id": "lc3eMKXaEfw"
}
]
}
with pytest.raises(DhisApiException):
Dhis._get_root_id(response)
def test_get_root_orgunit_none():
response = {
"pager": {
"page": 1,
"pageCount": 1,
"total": 1,
"pageSize": 50
},
"organisationUnits": []
}
with pytest.raises(DhisApiException):
Dhis._get_root_id(response)
| 32.747326
| 100
| 0.395142
| 1,386
| 24,495
| 6.937229
| 0.137085
| 0.028289
| 0.042434
| 0.063651
| 0.861258
| 0.845866
| 0.838794
| 0.821425
| 0.79376
| 0.778055
| 0
| 0.022393
| 0.485895
| 24,495
| 747
| 101
| 32.791165
| 0.741126
| 0
| 0
| 0.712482
| 0
| 0
| 0.305981
| 0.047397
| 0
| 0
| 0
| 0
| 0.009818
| 1
| 0.01683
| false
| 0
| 0.13324
| 0
| 0.15007
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c8fe0d63bbfb4e593b911dbf63d3e421d2ae5cd
| 7,397
|
py
|
Python
|
tests/regressiontests/utils/termcolors.py
|
Smarsh/django
|
ffb738e0f56027e16564a79b709cbf44596c2335
|
[
"BSD-3-Clause"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
tests/regressiontests/utils/termcolors.py
|
aprefontaine/TMScheduler
|
298a332532b9df1d3f6a80b1334630bc106d3b78
|
[
"BSD-3-Clause"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
tests/regressiontests/utils/termcolors.py
|
aprefontaine/TMScheduler
|
298a332532b9df1d3f6a80b1334630bc106d3b78
|
[
"BSD-3-Clause"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
from unittest import TestCase
from django.utils.termcolors import parse_color_setting, PALETTES, DEFAULT_PALETTE, LIGHT_PALETTE, DARK_PALETTE, NOCOLOR_PALETTE
class TermColorTests(TestCase):
def test_empty_string(self):
self.assertEquals(parse_color_setting(''), PALETTES[DEFAULT_PALETTE])
def test_simple_palette(self):
self.assertEquals(parse_color_setting('light'), PALETTES[LIGHT_PALETTE])
self.assertEquals(parse_color_setting('dark'), PALETTES[DARK_PALETTE])
self.assertEquals(parse_color_setting('nocolor'), None)
def test_fg(self):
self.assertEquals(parse_color_setting('error=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
def test_fg_bg(self):
self.assertEquals(parse_color_setting('error=green/blue'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
def test_fg_opts(self):
self.assertEquals(parse_color_setting('error=green,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
self.assertEquals(parse_color_setting('error=green,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink','bold')}))
def test_fg_bg_opts(self):
self.assertEquals(parse_color_setting('error=green/blue,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue', 'opts': ('blink',)}))
self.assertEquals(parse_color_setting('error=green/blue,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue', 'opts': ('blink','bold')}))
def test_override_palette(self):
self.assertEquals(parse_color_setting('light;error=green'),
dict(PALETTES[LIGHT_PALETTE],
ERROR={'fg':'green'}))
def test_override_nocolor(self):
self.assertEquals(parse_color_setting('nocolor;error=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg': 'green'}))
def test_reverse_override(self):
self.assertEquals(parse_color_setting('error=green;light'), PALETTES[LIGHT_PALETTE])
def test_multiple_roles(self):
self.assertEquals(parse_color_setting('error=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'},
SQL_FIELD={'fg':'blue'}))
def test_override_with_multiple_roles(self):
self.assertEquals(parse_color_setting('light;error=green;sql_field=blue'),
dict(PALETTES[LIGHT_PALETTE],
ERROR={'fg':'green'},
SQL_FIELD={'fg':'blue'}))
def test_empty_definition(self):
self.assertEquals(parse_color_setting(';'), None)
self.assertEquals(parse_color_setting('light;'), PALETTES[LIGHT_PALETTE])
self.assertEquals(parse_color_setting(';;;'), None)
def test_empty_options(self):
self.assertEquals(parse_color_setting('error=green,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEquals(parse_color_setting('error=green,,,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEquals(parse_color_setting('error=green,,blink,,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
def test_bad_palette(self):
self.assertEquals(parse_color_setting('unknown'), None)
def test_bad_role(self):
self.assertEquals(parse_color_setting('unknown='), None)
self.assertEquals(parse_color_setting('unknown=green'), None)
self.assertEquals(parse_color_setting('unknown=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
def test_bad_color(self):
self.assertEquals(parse_color_setting('error='), None)
self.assertEquals(parse_color_setting('error=;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
self.assertEquals(parse_color_setting('error=unknown'), None)
self.assertEquals(parse_color_setting('error=unknown;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
self.assertEquals(parse_color_setting('error=green/unknown'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEquals(parse_color_setting('error=green/blue/something'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg': 'blue'}))
self.assertEquals(parse_color_setting('error=green/blue/something,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg': 'blue', 'opts': ('blink',)}))
def test_bad_option(self):
self.assertEquals(parse_color_setting('error=green,unknown'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEquals(parse_color_setting('error=green,unknown,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
def test_role_case(self):
self.assertEquals(parse_color_setting('ERROR=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEquals(parse_color_setting('eRrOr=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
def test_color_case(self):
self.assertEquals(parse_color_setting('error=GREEN'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEquals(parse_color_setting('error=GREEN/BLUE'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
self.assertEquals(parse_color_setting('error=gReEn'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEquals(parse_color_setting('error=gReEn/bLuE'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
def test_opts_case(self):
self.assertEquals(parse_color_setting('error=green,BLINK'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
self.assertEquals(parse_color_setting('error=green,bLiNk'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
| 49.313333
| 128
| 0.556442
| 719
| 7,397
| 5.479833
| 0.069541
| 0.109137
| 0.185533
| 0.277157
| 0.911675
| 0.902792
| 0.839848
| 0.786041
| 0.691117
| 0.629188
| 0
| 0
| 0.305529
| 7,397
| 149
| 129
| 49.644295
| 0.766985
| 0
| 0
| 0.464
| 0
| 0
| 0.134649
| 0.03907
| 0
| 0
| 0
| 0
| 0.336
| 1
| 0.16
| false
| 0
| 0.016
| 0
| 0.184
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7ce8471d3cad31b7187d410f4921260f284256a7
| 26,948
|
py
|
Python
|
integration/core/test_api.py
|
amy/catalog-service
|
31c601fc4c036a4da27fc4f7437d8503a08f415a
|
[
"Apache-2.0"
] | null | null | null |
integration/core/test_api.py
|
amy/catalog-service
|
31c601fc4c036a4da27fc4f7437d8503a08f415a
|
[
"Apache-2.0"
] | null | null | null |
integration/core/test_api.py
|
amy/catalog-service
|
31c601fc4c036a4da27fc4f7437d8503a08f415a
|
[
"Apache-2.0"
] | 1
|
2019-10-16T13:00:41.000Z
|
2019-10-16T13:00:41.000Z
|
import pytest
import cattle
import requests
import json
from wait_for import wait_for
def headers(environment_id):
return {
'Accept': 'application/json',
'x-api-project-id': environment_id
}
DEFAULT_ENV = 'e1'
DEFAULT_HEADERS = headers(DEFAULT_ENV)
BASE_URL = 'http://localhost:8088/v1-catalog/'
def create_catalog(name, url, branch=None, headers=DEFAULT_HEADERS):
schemas_url = 'http://localhost:8088/v1-catalog/schemas'
client = cattle.from_env(url=schemas_url, headers=headers)
original_catalogs = client.list_catalog()
assert len(original_catalogs) > 0
original_templates = client.list_template()
assert len(original_templates) > 0
data = {
'name': name,
'url': url,
}
if branch:
data['branch'] = branch
api_url = 'http://localhost:8088/v1-catalog/catalogs'
response = requests.post(api_url, data=json.dumps(data), headers=headers)
assert response.status_code == 200
resp = response.json()
assert resp['name'] == name
assert resp['url'] == url
if branch:
assert resp['branch'] == branch
api_url = 'http://localhost:8088/v1-catalog/templates?action=refresh'
response = requests.post(api_url, headers=headers)
assert response.status_code == 204
templates = client.list_template()
catalogs = client.list_catalog()
assert len(catalogs) == len(original_catalogs) + 1
assert len(templates) > len(original_templates)
return resp
def create_duplicate_catalog(name, url, branch=None, headers=DEFAULT_HEADERS):
schemas_url = 'http://localhost:8088/v1-catalog/schemas'
client = cattle.from_env(url=schemas_url, headers=headers)
original_catalogs = client.list_catalog()
assert len(original_catalogs) > 0
original_templates = client.list_template()
assert len(original_templates) > 0
data = {
'name': name,
'url': url,
}
if branch:
data['branch'] = branch
api_url = 'http://localhost:8088/v1-catalog/catalogs'
response = requests.post(api_url, data=json.dumps(data), headers=headers)
assert response.status_code == 422
def delete_catalog(name, headers=DEFAULT_HEADERS):
schemas_url = 'http://localhost:8088/v1-catalog/schemas'
client = cattle.from_env(url=schemas_url, headers=headers)
original_catalogs = client.list_catalog()
assert len(original_catalogs) > 0
original_templates = client.list_template()
assert len(original_templates) > 0
url = 'http://localhost:8088/v1-catalog/catalogs/' + name
response = requests.delete(url, headers=headers)
assert response.status_code == 204
templates = client.list_template()
catalogs = client.list_catalog()
assert len(catalogs) == len(original_catalogs) - 1
assert len(templates) < len(original_templates)
@pytest.fixture
def client():
url = 'http://localhost:8088/v1-catalog/schemas'
catalogs = cattle.from_env(url=url, headers=DEFAULT_HEADERS).list_catalog()
wait_for(
lambda: len(catalogs) > 0
)
return cattle.from_env(url=url, headers=DEFAULT_HEADERS)
def test_catalog_list(client):
catalogs = client.list_catalog()
assert len(catalogs) == 2
for catalog in catalogs:
if catalog.name == 'orig':
assert catalog.url == 'https://github.com/rancher/test-catalog'
elif catalog.name == 'updated':
assert catalog.url == '/tmp/test-catalog'
else:
assert False
def test_get_catalogs(client):
url = 'http://localhost:8088/v1-catalog/catalogs'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()['data'][0]
assert resp['name'] == 'orig'
assert resp['url'] == 'https://github.com/rancher/test-catalog'
assert resp['links']['self'] == 'http://localhost:8088/' + \
'v1-catalog/catalogs/orig?projectId=' + DEFAULT_ENV
def test_get_catalog(client):
url = 'http://localhost:8088/v1-catalog/catalogs/orig'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['name'] == 'orig'
assert resp['url'] == 'https://github.com/rancher/test-catalog'
assert resp['links']['self'] == 'http://localhost:8088/' + \
'v1-catalog/catalogs/orig?projectId=' + DEFAULT_ENV
def test_get_catalog_404(client):
url = 'http://localhost:8088/v1-catalog/catalogs/not-real'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 404
def test_catalog_commit(client):
latest_commit = '4ec17d4c057be16e01fecb599af16b2b9dda9065'
url = 'http://localhost:8088/v1-catalog/catalogs/orig'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['commit'] == latest_commit
url = 'http://localhost:8088/v1-catalog/catalogs/updated'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['commit'] != latest_commit
def test_create_and_delete_catalog(client):
url = 'https://github.com/rancher/community-catalog'
create_catalog('created', url)
delete_catalog('created')
def test_catalog_branch(client):
url = 'https://github.com/rancher/test-catalog'
create_catalog('branch', url, "test-branch")
delete_catalog('branch')
def test_catalog_duplicate_env_name(client):
url = 'https://github.com/rancher/test-catalog'
create_catalog('test', url)
create_duplicate_catalog('test', url)
delete_catalog('test')
def test_catalog_duplicate_global_name(client):
# orig is the name of a global catalog that already exists
url = 'https://github.com/rancher/test-catalog'
create_duplicate_catalog('orig', url)
def test_catalog_edit(client):
url = 'https://github.com/rancher/community-catalog'
create_resp = create_catalog('edit', url)
url = 'https://github.com/rancher/rancher-catalog'
different_name = 'different_name'
data = {
'url': url,
'name': different_name,
}
api_url = create_resp['links']['self']
response = requests.put(api_url, data=json.dumps(data),
headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
response = requests.get(resp['links']['self'], headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['url'] == url
assert resp['name'] == different_name
delete_catalog(different_name)
def test_catalog_different_environment(client):
original_catalogs = client.list_catalog()
assert len(original_catalogs) > 0
original_templates = client.list_template()
assert len(original_templates) > 0
url = 'https://github.com/rancher/community-catalog'
create_catalog('env', url, headers=headers('e2'))
templates = client.list_template()
catalogs = client.list_catalog()
assert len(catalogs) == len(original_catalogs)
assert len(templates) == len(original_templates)
delete_catalog('env', headers=headers('e2'))
def test_template_list(client):
templates = client.list_template()
assert len(templates) > 0
def test_get_template(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:k8s'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['catalogId'] == 'orig'
assert resp['folderName'] == 'k8s'
assert resp['defaultVersion'] == 'v1.3.0-rancher4'
assert len(resp['categories']) == 1
assert resp['categories'][0] == 'System'
def test_get_template_template_version(client):
url = 'http://localhost:8088/v1-catalog/templates' + \
'/orig:k8s-template-version'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['catalogId'] == 'orig'
assert resp['folderName'] == 'k8s-template-version'
assert resp['defaultVersion'] == 'v1.3.0-rancher4'
assert len(resp['categories']) == 1
assert resp['categories'][0] == 'System'
def test_get_template_with_version_folders(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:version-folders'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['catalogId'] == 'orig'
assert resp['folderName'] == 'version-folders'
versionLinks = resp['versionLinks']
assert len(versionLinks) == 3
assert 'v0.0.1' in versionLinks
assert 'v0.0.1-rancher1.2' in versionLinks
assert 'v0.0.3' in versionLinks
for version in ('v0.0.1', 'v0.0.1-rancher1.2', 'v0.0.3'):
version_id = 'orig:version-folders:' + version
assert version_id in versionLinks.values()[0] or \
version_id in versionLinks.values()[1] or \
version_id in versionLinks.values()[2]
def test_get_template_404(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:not-real'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 404
def test_template_category(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:nfs-server'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert len(resp['categories']) == 1
assert resp['categories'][0] == 'Test'
def test_template_categories(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:categories'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert len(resp['categories']) == 2
assert resp['categories'][0] == 'category1'
assert resp['categories'][1] == 'category2'
def test_preserve_category_case(client):
url = BASE_URL + 'templates/orig:upper-case-categories'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert len(resp['categories']) == 3
assert resp['categories'][0] == 'CATEGORY1'
assert resp['categories'][1] == 'CATEGORY2'
assert resp['categories'][2] == 'CATEGORY3'
def test_category_filter(client):
base_url = 'http://localhost:8088/v1-catalog/templates?category='
for category in ('category1', 'category2', 'category3', 'System'):
response = requests.get(base_url + category, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['data'] is not None
for template in resp['data']:
categories = [c.lower() for c in template['categories']]
assert category.lower() in categories
def test_category_ne_filter(client):
base_url = 'http://localhost:8088/v1-catalog/templates?category_ne='
for category in ('category1', 'category2', 'System'):
response = requests.get(base_url + category, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['data'] is not None
for template in resp['data']:
categories = template['categories']
if categories:
assert category not in template['categories']
def test_template_without_categories(client):
base_url = 'http://localhost:8088/v1-catalog/templates'
for category in ('category1', 'category2', 'System'):
url = base_url + '?catalog_ne=' + category
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
templates = resp['data']
no_categories_template_found = False
for template in templates:
if template['folderName'] == 'no-categories':
no_categories_template_found = True
break
assert no_categories_template_found
def test_machine_template(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:machine*vultr'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['templateBase'] == 'machine'
url = 'http://localhost:8088/v1-catalog/templates/orig:machine*vultr:0'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert len(resp['files']) == 2
assert 'rancher-compose.yml' in resp['files']
assert 'url' in resp['files']
def test_template_labels(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:labels'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['labels'] is not None
assert resp['labels']['key1'] == 'value1'
assert resp['labels']['key2'] == 'value2'
def test_template_version_links(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:many-versions'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert len(resp['versionLinks']) == 14
url = 'http://localhost:8088/v1-catalog/templates/orig:many-versions' + \
'?rancherVersion=v1.0.1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert len(resp['versionLinks']) == 9
def test_rancher_version_filter(client):
templates = client.list_template()
assert len(templates) > 0
min_rancher_template_found = False
max_rancher_template_found = False
for template in templates:
if template.folderName == 'min-rancher-version':
min_rancher_template_found = True
if template.folderName == 'max-rancher-version':
max_rancher_template_found = True
assert min_rancher_template_found
assert max_rancher_template_found
url = 'http://localhost:8088/v1-catalog/templates?rancherVersion=v1.2.0'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['data'] is not None
for template in resp['data']:
assert template['folderName'] != 'min-rancher-version'
url = 'http://localhost:8088/v1-catalog/templates?rancherVersion=v1.5.0'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['data'] is not None
for template in resp['data']:
assert template['folderName'] != 'max-rancher-version'
def test_upgrade_links(client):
url = 'http://localhost:8088/v1-catalog/templates/' + \
'orig:test-upgrade-links:1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
upgradeLinks = resp['upgradeVersionLinks']
assert upgradeLinks is not None
assert len(upgradeLinks) == 10
url = 'http://localhost:8088/v1-catalog/templates/orig:many-versions:2' + \
'?rancherVersion=v1.0.1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
upgradeLinks = resp['upgradeVersionLinks']
assert upgradeLinks is not None
assert len(upgradeLinks) == 7
def test_template_icon(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:nfs-server' + \
'?image&projectId=%s' % (DEFAULT_ENV)
response = requests.get(url, headers=headers(''))
assert response.status_code == 200
assert len(response.content) == 1139
def test_get_template_version_by_revision(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:k8s:0'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == 'v0.1.0-rancher1'
url = 'http://localhost:8088/v1-catalog/templates/orig:k8s:1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == 'v1.2.4-rancher6'
def test_get_template_version_by_revision_template_version(client):
url = 'http://localhost:8088/v1-catalog/templates' + \
'/orig:k8s-template-version:0'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == 'v0.1.0-rancher1'
url = 'http://localhost:8088/v1-catalog/templates' + \
'/orig:k8s-template-version:1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == 'v1.2.4-rancher6'
def test_get_template_version_by_version(client):
url = BASE_URL+'templates/orig:version-folders:v0.0.1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == 'v0.0.1'
url = BASE_URL+'templates/orig:version-folders:v0.0.1-rancher1.2'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == 'v0.0.1-rancher1.2'
url = BASE_URL+'templates/orig:version-folders:v0.0.3'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == 'v0.0.3'
url = BASE_URL+'templates/orig:version-folders:v0.0.2'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 404
def test_get_template_version_404(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:k8s:1000'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 404
def test_get_template_version_404_template_version(client):
url = 'http://localhost:8088/v1-catalog/templates' + \
'/orig:k8s-template-version:1000'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 404
def test_get_template_version_labels(client):
url = 'http://localhost:8088/v1-catalog/templates/orig:version-labels:0'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['labels'] is not None
assert resp['labels']['key1'] == 'value1'
assert resp['labels']['key2'] == 'value2'
def test_template_version_questions(client):
url = 'http://localhost:8088/v1-catalog/templates/' + \
'orig:all-question-types:1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
questions = resp['questions']
assert questions is not None
assert len(questions) == 11
assert questions[0]['variable'] == 'TEST_STRING'
assert questions[0]['label'] == 'String'
assert not questions[0]['required']
assert questions[0]['default'] == 'hello'
assert questions[0]['type'] == 'string'
assert questions[1]['variable'] == 'TEST_MULTILINE'
assert questions[1]['label'] == 'Multi-Line'
assert not questions[1]['required']
assert questions[1]['default'] == 'Hello\nWorld\n'
assert questions[1]['type'] == 'multiline'
assert questions[2]['variable'] == 'TEST_PASSWORD'
assert questions[2]['label'] == 'Password'
assert not questions[2]['required']
assert questions[2]['default'] == "not-so-secret stuff"
assert questions[2]['type'] == 'password'
assert questions[3]['variable'] == 'TEST_ENUM'
assert questions[3]['label'] == 'Enum'
assert not questions[3]['required']
assert questions[3]['options'] == ['purple', 'monkey', 'dishwasher']
assert questions[3]['default'] == 'monkey'
assert questions[3]['type'] == 'enum'
assert questions[4]['variable'] == 'TEST_DATE'
assert questions[4]['label'] == 'Date'
assert not questions[4]['required']
assert questions[4]['default'] == '2015-07-25T19:55:00Z'
assert questions[4]['type'] == 'date'
assert questions[5]['variable'] == 'TEST_INT'
assert questions[5]['label'] == 'Integer'
assert not questions[5]['required']
assert questions[5]['default'] == '42'
assert questions[5]['type'] == 'int'
assert questions[6]['variable'] == 'TEST_FLOAT'
assert questions[6]['label'] == 'Float'
assert not questions[6]['required']
assert questions[6]['default'] == '4.2'
assert questions[6]['type'] == 'float'
assert questions[7]['variable'] == 'TEST_BOOLEAN'
assert questions[7]['label'] == 'Boolean'
assert not questions[7]['required']
assert questions[7]['default'] == 'true'
assert questions[7]['type'] == 'boolean'
assert questions[8]['variable'] == 'TEST_SERVICE'
assert questions[8]['label'] == 'Service'
assert not questions[8]['required']
assert questions[8]['default'] == 'kopf'
assert questions[8]['type'] == 'service'
assert questions[9]['variable'] == 'TEST_CERTIFICATE'
assert questions[9]['label'] == 'Certificate'
assert not questions[9]['required']
assert questions[9]['default'] == 'rancher.rocks'
assert questions[9]['type'] == 'certificate'
assert questions[10]['variable'] == 'TEST_UNKNOWN'
assert questions[10]['label'] == 'Unknown'
assert not questions[10]['required']
assert questions[10]['default'] == 'wha?'
assert questions[10]['type'] == 'unknown'
def test_template_version_questions_template_version(client):
url = 'http://localhost:8088/v1-catalog/templates/' + \
'orig:all-question-types-template-version:1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
questions = resp['questions']
assert questions is not None
assert len(questions) == 11
assert questions[0]['variable'] == 'TEST_STRING'
assert questions[0]['label'] == 'String'
assert not questions[0]['required']
assert questions[0]['default'] == 'hello'
assert questions[0]['type'] == 'string'
assert questions[1]['variable'] == 'TEST_MULTILINE'
assert questions[1]['label'] == 'Multi-Line'
assert not questions[1]['required']
assert questions[1]['default'] == 'Hello\nWorld\n'
assert questions[1]['type'] == 'multiline'
assert questions[2]['variable'] == 'TEST_PASSWORD'
assert questions[2]['label'] == 'Password'
assert not questions[2]['required']
assert questions[2]['default'] == "not-so-secret stuff"
assert questions[2]['type'] == 'password'
assert questions[3]['variable'] == 'TEST_ENUM'
assert questions[3]['label'] == 'Enum'
assert not questions[3]['required']
assert questions[3]['options'] == ['purple', 'monkey', 'dishwasher']
assert questions[3]['default'] == 'monkey'
assert questions[3]['type'] == 'enum'
assert questions[4]['variable'] == 'TEST_DATE'
assert questions[4]['label'] == 'Date'
assert not questions[4]['required']
assert questions[4]['default'] == '2015-07-25T19:55:00Z'
assert questions[4]['type'] == 'date'
assert questions[5]['variable'] == 'TEST_INT'
assert questions[5]['label'] == 'Integer'
assert not questions[5]['required']
assert questions[5]['default'] == '42'
assert questions[5]['type'] == 'int'
assert questions[6]['variable'] == 'TEST_FLOAT'
assert questions[6]['label'] == 'Float'
assert not questions[6]['required']
assert questions[6]['default'] == '4.2'
assert questions[6]['type'] == 'float'
assert questions[7]['variable'] == 'TEST_BOOLEAN'
assert questions[7]['label'] == 'Boolean'
assert not questions[7]['required']
assert questions[7]['default'] == 'true'
assert questions[7]['type'] == 'boolean'
assert questions[8]['variable'] == 'TEST_SERVICE'
assert questions[8]['label'] == 'Service'
assert not questions[8]['required']
assert questions[8]['default'] == 'kopf'
assert questions[8]['type'] == 'service'
assert questions[9]['variable'] == 'TEST_CERTIFICATE'
assert questions[9]['label'] == 'Certificate'
assert not questions[9]['required']
assert questions[9]['default'] == 'rancher.rocks'
assert questions[9]['type'] == 'certificate'
assert questions[10]['variable'] == 'TEST_UNKNOWN'
assert questions[10]['label'] == 'Unknown'
assert not questions[10]['required']
assert questions[10]['default'] == 'wha?'
assert questions[10]['type'] == 'unknown'
def test_refresh(client):
url = 'http://localhost:8088/v1-catalog/templates/updated:many-versions:14'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['version'] == '1.0.14'
def test_refresh_no_changes(client):
original_catalogs = client.list_catalog()
assert len(original_catalogs) > 0
original_templates = client.list_template()
assert len(original_templates) > 0
url = 'http://localhost:8088/v1-catalog/templates?action=refresh'
response = requests.post(url, headers=DEFAULT_HEADERS)
assert response.status_code == 204
catalogs = client.list_catalog()
templates = client.list_template()
assert len(catalogs) == len(original_catalogs)
assert len(templates) == len(original_templates)
def test_v2_syntax(client):
for revision in [0, 1, 2, 3]:
url = 'http://localhost:8088/v1-catalog/templates/orig:v2:' + \
str(revision)
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
def test_alternative_config_fields_1(client):
url = 'http://localhost:8088/v1-catalog/templates' + \
'/orig:alternative-config-fields-1'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['defaultVersion'] == '3.0.0'
assert resp['links']['project'] == 'www.test.com'
def test_alternative_config_fields_2(client):
url = 'http://localhost:8088/v1-catalog/templates' + \
'/orig:alternative-config-fields-2'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['defaultVersion'] == '3.0.0'
assert resp['links']['project'] == 'www.test.com'
def test_alternative_config_fields_3(client):
url = 'http://localhost:8088/v1-catalog/templates' + \
'/orig:alternative-config-fields-3'
response = requests.get(url, headers=DEFAULT_HEADERS)
assert response.status_code == 200
resp = response.json()
assert resp['defaultVersion'] == '3.0.0'
| 35.134289
| 79
| 0.676043
| 3,303
| 26,948
| 5.392371
| 0.066001
| 0.07748
| 0.047723
| 0.053338
| 0.859413
| 0.840267
| 0.829319
| 0.812082
| 0.794509
| 0.759531
| 0
| 0.036219
| 0.178306
| 26,948
| 766
| 80
| 35.180157
| 0.768143
| 0.002078
| 0
| 0.635294
| 0
| 0
| 0.241837
| 0.025065
| 0
| 0
| 0
| 0
| 0.453782
| 1
| 0.078992
| false
| 0.010084
| 0.008403
| 0.001681
| 0.092437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b07290d6c051a24df18267c0fd4bdaee1b0c2e2
| 16,109
|
py
|
Python
|
tests/benchmarks/pybench/Lookups.py
|
Mortal/Nuitka
|
5150eeff7ff845ed4993c773449cd81b7f127c6b
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmarks/pybench/Lookups.py
|
Mortal/Nuitka
|
5150eeff7ff845ed4993c773449cd81b7f127c6b
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmarks/pybench/Lookups.py
|
Mortal/Nuitka
|
5150eeff7ff845ed4993c773449cd81b7f127c6b
|
[
"Apache-2.0"
] | 1
|
2018-12-16T23:51:18.000Z
|
2018-12-16T23:51:18.000Z
|
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pybench import Test
class SpecialClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in xrange(self.rounds):
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
def calibrate(self):
class c:
pass
for i in xrange(self.rounds):
pass
class NormalClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in xrange(self.rounds):
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
def calibrate(self):
class c:
pass
for i in xrange(self.rounds):
pass
class SpecialInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
def calibrate(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
pass
class NormalInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
def calibrate(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
pass
class BuiltinMethodLookup(Test):
version = 2.0
operations = 5*(3*5 + 3*5)
rounds = 70000
def test(self):
l = []
d = {}
for i in xrange(self.rounds):
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
def calibrate(self):
l = []
d = {}
for i in xrange(self.rounds):
pass
| 16.693264
| 78
| 0.282078
| 2,096
| 16,109
| 1.8125
| 0.061069
| 0.063175
| 0.031587
| 0.042116
| 0.787576
| 0.787576
| 0.781258
| 0.780205
| 0.780205
| 0.765465
| 0
| 0.05317
| 0.635732
| 16,109
| 964
| 79
| 16.710581
| 0.59424
| 0.05289
| 0
| 0.988571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0.018571
| 0.001429
| 0
| 0.055714
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6b189c555d37db387732df2a25468a22d726bed7
| 177
|
py
|
Python
|
yotta/__init__.py
|
microbit-foundation/yotta
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
[
"Apache-2.0"
] | 176
|
2015-01-02T07:31:59.000Z
|
2022-03-21T12:40:02.000Z
|
yotta/__init__.py
|
microbit-foundation/yotta
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
[
"Apache-2.0"
] | 549
|
2015-01-05T16:19:54.000Z
|
2021-01-15T13:46:42.000Z
|
yotta/__init__.py
|
microbit-foundation/yotta
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
[
"Apache-2.0"
] | 84
|
2015-01-10T21:01:00.000Z
|
2022-03-24T16:04:42.000Z
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
from yotta.main import main
from yotta.main import __version__
| 19.666667
| 48
| 0.779661
| 27
| 177
| 4.962963
| 0.740741
| 0.134328
| 0.19403
| 0.283582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0.169492
| 177
| 8
| 49
| 22.125
| 0.870748
| 0.581921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6b466e1a894a42f5dc032ab8a5ea6b9381099607
| 19,542
|
py
|
Python
|
ext/ANTsPyNet/antspynet/architectures/create_deep_back_projection_network_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 2
|
2021-11-16T10:00:33.000Z
|
2021-12-13T02:57:40.000Z
|
ext/ANTsPyNet/antspynet/architectures/create_deep_back_projection_network_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | null | null | null |
ext/ANTsPyNet/antspynet/architectures/create_deep_back_projection_network_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 1
|
2021-12-13T02:57:27.000Z
|
2021-12-13T02:57:27.000Z
|
from keras.models import Model
from keras.layers import (Input, Add, Subtract,
PReLU, Concatenate,
Conv2D, Conv2DTranspose,
Conv3D, Conv3DTranspose)
def create_deep_back_projection_network_model_2d(input_image_size,
number_of_outputs=1,
number_of_base_filters=64,
number_of_feature_filters=256,
number_of_back_projection_stages=7,
convolution_kernel_size=(12, 12),
strides=(8, 8),
last_convolution=(3, 3),
number_of_loss_functions=1
):
"""
2-D implementation of the deep back-projection network.
Creates a keras model of the deep back-project network for image super
resolution. More information is provided at the authors' website:
https://www.toyota-ti.ac.jp/Lab/Denshi/iim/members/muhammad.haris/projects/DBPN.html
with the paper available here:
https://arxiv.org/abs/1803.02735
This particular implementation was influenced by the following keras (python)
implementation:
https://github.com/rajatkb/DBPN-Keras
with help from the original author's Caffe and Pytorch implementations:
https://github.com/alterzero/DBPN-caffe
https://github.com/alterzero/DBPN-Pytorch
Arguments
---------
input_image_size : tuple of length 3
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_outputs : integer
Number of outputs (e.g., 3 for RGB images).
number_of_feature_filters : integer
Number of feature filters.
number_of_base_filters : integer
Number of base filters.
number_of_back_projection_stages : integer
Number of up-down-projection stages.
This number includes the final up block.
convolution_kernel_size : tuple of length 2
Kernel size for certain convolutional layers. The strides are dependent on
the scale factor discussed in original paper. Factors used in the original
implementation are as follows:
2x --> convolution_kernel_size=(6, 6),
4x --> convolution_kernel_size=(8, 8),
8x --> convolution_kernel_size=(12, 12). We default to 8x parameters.
strides : tuple of length 2
Strides for certain convolutional layers. This and the
convolution_kernel_size are dependent on the scale factor discussed in
original paper. Factors used in the original implementation are as follows:
2x --> strides = (2, 2),
4x --> strides = (4, 4),
8x --> strides = (8, 8). We default to 8x parameters.
last_convolution: tuple of length 2
The kernel size for the last convolutional layer.
number_of_loss_functions : integer
The number of data targets, e.g. 2 for 2 targets
Returns
-------
Keras model
A 2-D Keras model defining the network.
Example
-------
>>> model = create_deep_back_projection_network_model_2d((128, 128, 1))
>>> model.summary()
"""
def up_block_2d(L, number_of_filters=64, kernel_size=(12, 12), strides=(8, 8),
include_dense_convolution_layer=True):
if include_dense_convolution_layer == True:
L = Conv2D(filters = number_of_filters,
use_bias=True,
kernel_size=(1, 1),
strides=(1, 1),
padding='same')(L)
L = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(L)
# Scale up
H0 = Conv2DTranspose(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(L)
H0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(H0)
# Scale down
L0 = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(H0)
L0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(L0)
# Residual
E = Subtract()([L0, L])
# Scale residual up
H1 = Conv2DTranspose(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(E)
H1 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(H1)
# Output feature map
up_block = Add()([H0, H1])
return(up_block)
def down_block_2d(H, number_of_filters=64, kernel_size=(12, 12), strides=(8, 8),
include_dense_convolution_layer=True):
if include_dense_convolution_layer == True:
H = Conv2D(filters = number_of_filters,
use_bias=True,
kernel_size=(1, 1),
strides=(1, 1),
padding='same')(H)
H = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(H)
# Scale down
L0 = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(H)
L0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(L0)
# Scale up
H0 = Conv2DTranspose(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(L0)
H0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(H0)
# Residual
E = Subtract()([H0, H])
# Scale residual down
L1 = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(E)
L1 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(L1)
# Output feature map
down_block = Add()([L0, L1])
return(down_block)
inputs = Input(shape=input_image_size)
# Initial feature extraction
model = Conv2D(filters=number_of_feature_filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer='glorot_uniform')(inputs)
model = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(model)
# Feature smashing
model = Conv2D(filters=number_of_base_filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
kernel_initializer='glorot_uniform')(model)
model = PReLU(alpha_initializer='zero',
shared_axes=[1, 2])(model)
# Back projection
up_projection_blocks = []
down_projection_blocks = []
model = up_block_2d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides)
up_projection_blocks.append(model)
for i in range(number_of_back_projection_stages):
if i == 0:
model = down_block_2d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides)
down_projection_blocks.append(model)
model = up_block_2d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides)
up_projection_blocks.append(model)
model = Concatenate()(up_projection_blocks)
else:
model = down_block_2d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides,
include_dense_convolution_layer=True)
down_projection_blocks.append(model)
model = Concatenate()(down_projection_blocks)
model = up_block_2d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides,
include_dense_convolution_layer=True)
up_projection_blocks.append(model)
model = Concatenate()(up_projection_blocks)
# Final convolution layer
outputs = Conv2D(filters=number_of_outputs,
kernel_size=last_convolution,
strides=(1, 1),
padding = 'same',
kernel_initializer = "glorot_uniform")(model)
if number_of_loss_functions == 1:
deep_back_projection_network_model = Model(inputs=inputs, outputs=outputs)
else:
outputList=[]
for k in range(number_of_loss_functions):
outputList.append(outputs)
deep_back_projection_network_model = Model(inputs=inputs, outputs=outputList)
return(deep_back_projection_network_model)
def create_deep_back_projection_network_model_3d(input_image_size,
number_of_outputs=1,
number_of_base_filters=64,
number_of_feature_filters=256,
number_of_back_projection_stages=7,
convolution_kernel_size=(12, 12, 12),
strides=(8, 8, 8),
last_convolution=(3, 3, 3),
number_of_loss_functions=1
):
"""
3-D implementation of the deep back-projection network.
Creates a keras model of the deep back-project network for image super
resolution. More information is provided at the authors' website:
https://www.toyota-ti.ac.jp/Lab/Denshi/iim/members/muhammad.haris/projects/DBPN.html
with the paper available here:
https://arxiv.org/abs/1803.02735
This particular implementation was influenced by the following keras (python)
implementation:
https://github.com/rajatkb/DBPN-Keras
with help from the original author's Caffe and Pytorch implementations:
https://github.com/alterzero/DBPN-caffe
https://github.com/alterzero/DBPN-Pytorch
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_outputs : integer
Number of outputs (e.g., 3 for RGB images).
number_of_feature_filters : integer
Number of feature filters.
number_of_base_filters : integer
Number of base filters.
number_of_back_projection_stages : integer
Number of up-down-projection stages.
This number includes the final up block.
convolution_kernel_size : tuple of length 3
Kernel size for certain convolutional layers. The strides are dependent on
the scale factor discussed in original paper. Factors used in the original
implementation are as follows:
2x --> convolution_kernel_size=(6, 6, 6),
4x --> convolution_kernel_size=(8, 8, 8),
8x --> convolution_kernel_size=(12, 12, 12). We default to 8x parameters.
strides : tuple of length 3
Strides for certain convolutional layers. This and the
convolution_kernel_size are dependent on the scale factor discussed in
original paper. Factors used in the original implementation are as follows:
2x --> strides = (2, 2, 2),
4x --> strides = (4, 4, 4),
8x --> strides = (8, 8, 8). We default to 8x parameters.
last_convolution: tuple of length 3
The kernel size for the last convolutional layer.
number_of_loss_functions : integer
The number of data targets, e.g. 2 for 2 targets
Returns
-------
Keras model
A 3-D Keras model defining the network.
Example
-------
>>> model = create_deep_back_projection_network_model_3d((128, 128, 128, 1))
>>> model.summary()
"""
def up_block_3d(L, number_of_filters=64, kernel_size=(12, 12, 12), strides=(8, 8, 8),
include_dense_convolution_layer=True):
if include_dense_convolution_layer == True:
L = Conv3D(filters = number_of_filters,
use_bias=True,
kernel_size=(1, 1, 1),
strides=(1, 1, 1),
padding='same')(L)
L = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(L)
# Scale up
H0 = Conv3DTranspose(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(L)
H0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(H0)
# Scale down
L0 = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(H0)
L0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(L0)
# Residual
E = Subtract()([L0, L])
# Scale residual up
H1 = Conv3DTranspose(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(E)
H1 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(H1)
# Output feature map
up_block = Add()([H0, H1])
return(up_block)
def down_block_3d(H, number_of_filters=64, kernel_size=(12, 12, 12), strides=(8, 8, 8),
include_dense_convolution_layer=True):
if include_dense_convolution_layer == True:
H = Conv3D(filters = number_of_filters,
use_bias=True,
kernel_size=(1, 1, 1),
strides=(1, 1, 1),
padding='same')(H)
H = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(H)
# Scale down
L0 = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(H)
L0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(L0)
# Scale up
H0 = Conv3DTranspose(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(L0)
H0 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(H0)
# Residual
E = Subtract()([H0, H])
# Scale residual down
L1 = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='glorot_uniform',
padding='same')(E)
L1 = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(L1)
# Output feature map
down_block = Add()([L0, L1])
return(down_block)
inputs = Input(shape=input_image_size)
# Initial feature extraction
model = Conv3D(filters=number_of_feature_filters,
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding='same',
kernel_initializer='glorot_uniform')(inputs)
model = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(model)
# Feature smashing
model = Conv3D(filters=number_of_base_filters,
kernel_size=(1, 1, 1),
strides=(1, 1, 1),
padding='same',
kernel_initializer='glorot_uniform')(model)
model = PReLU(alpha_initializer='zero',
shared_axes=[1, 2, 3])(model)
# Back projection
up_projection_blocks = []
down_projection_blocks = []
model = up_block_3d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides)
up_projection_blocks.append(model)
for i in range(number_of_back_projection_stages):
if i == 0:
model = down_block_3d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides)
down_projection_blocks.append(model)
model = up_block_3d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides)
up_projection_blocks.append(model)
model = Concatenate()(up_projection_blocks)
else:
model = down_block_3d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides,
include_dense_convolution_layer=True)
down_projection_blocks.append(model)
model = Concatenate()(down_projection_blocks)
model = up_block_3d(model, number_of_filters=number_of_base_filters,
kernel_size=convolution_kernel_size, strides=strides,
include_dense_convolution_layer=True)
up_projection_blocks.append(model)
model = Concatenate()(up_projection_blocks)
# Final convolution layer
outputs = Conv3D(filters=number_of_outputs,
kernel_size=last_convolution,
strides=(1, 1, 1),
padding = 'same',
kernel_initializer = "glorot_uniform")(model)
if number_of_loss_functions == 1:
deep_back_projection_network_model = Model(inputs=inputs, outputs=outputs)
else:
outputList=[]
for k in range(number_of_loss_functions):
outputList.append(outputs)
deep_back_projection_network_model = Model(inputs=inputs, outputs=outputList)
return(deep_back_projection_network_model)
| 36.871698
| 96
| 0.565142
| 2,123
| 19,542
| 4.9496
| 0.092793
| 0.063951
| 0.051389
| 0.050247
| 0.982109
| 0.978588
| 0.971545
| 0.957937
| 0.942901
| 0.926437
| 0
| 0.029451
| 0.351909
| 19,542
| 529
| 97
| 36.941399
| 0.800237
| 0.267424
| 0
| 0.846442
| 0
| 0
| 0.030402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0
| 0.007491
| 0
| 0.029963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.