hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20ddea3bbd375681758756b26a3e266d63f0568f
| 81,693
|
py
|
Python
|
pyboto3/dataexchange.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/dataexchange.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/dataexchange.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def cancel_job(JobId=None):
"""
This operation cancels a job. Jobs can be cancelled only when they are in the WAITING state.
See also: AWS API Documentation
Exceptions
:example: response = client.cancel_job(
JobId='string'
)
:type JobId: string
:param JobId: [REQUIRED]\nThe unique identifier for a job.\n
"""
pass
def create_data_set(AssetType=None, Description=None, Name=None, Tags=None):
"""
This operation creates a data set.
See also: AWS API Documentation
Exceptions
:example: response = client.create_data_set(
AssetType='S3_SNAPSHOT',
Description='string',
Name='string',
Tags={
'string': 'string'
}
)
:type AssetType: string
:param AssetType: [REQUIRED]\nThe type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.\n
:type Description: string
:param Description: [REQUIRED]\nA description for the data set. This value can be up to 16,348 characters long.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the data set.\n
:type Tags: dict
:param Tags: A data set tag is an optional label that you can assign to a data set when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
201 response
Arn (string) --
The ARN for the data set.
AssetType (string) --
The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
CreatedAt (datetime) --
The date and time that the data set was created, in ISO 8601 format.
Description (string) --
The description for the data set.
Id (string) --
The unique identifier for the data set.
Name (string) --
The name of the data set.
Origin (string) --
A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
OriginDetails (dict) --
If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.
ProductId (string) --
SourceId (string) --
The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.
Tags (dict) --
The tags for the data set.
(string) --
(string) --
UpdatedAt (datetime) --
The date and time that the data set was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.ServiceLimitExceededException
DataExchange.Client.exceptions.AccessDeniedException
:return: {
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
ProductId (string) --
"""
pass
def create_job(Details=None, Type=None):
"""
This operation creates a job.
See also: AWS API Documentation
Exceptions
:example: response = client.create_job(
Details={
'ExportAssetToSignedUrl': {
'AssetId': 'string',
'DataSetId': 'string',
'RevisionId': 'string'
},
'ExportAssetsToS3': {
'AssetDestinations': [
{
'AssetId': 'string',
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'Encryption': {
'KmsKeyArn': 'string',
'Type': 'aws:kms'|'AES256'
},
'RevisionId': 'string'
},
'ImportAssetFromSignedUrl': {
'AssetName': 'string',
'DataSetId': 'string',
'Md5Hash': 'string',
'RevisionId': 'string'
},
'ImportAssetsFromS3': {
'AssetSources': [
{
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'RevisionId': 'string'
}
},
Type='IMPORT_ASSETS_FROM_S3'|'IMPORT_ASSET_FROM_SIGNED_URL'|'EXPORT_ASSETS_TO_S3'|'EXPORT_ASSET_TO_SIGNED_URL'
)
:type Details: dict
:param Details: [REQUIRED]\nThe details for the CreateJob request.\n\nExportAssetToSignedUrl (dict) --Details about the export to signed URL request.\n\nAssetId (string) -- [REQUIRED]The unique identifier for the asset that is exported to a signed URL.\n\nDataSetId (string) -- [REQUIRED]The unique identifier for the data set associated with this export job.\n\nRevisionId (string) -- [REQUIRED]The unique identifier for the revision associated with this export request.\n\n\n\nExportAssetsToS3 (dict) --Details about the export to Amazon S3 request.\n\nAssetDestinations (list) -- [REQUIRED]The destination for the asset.\n\n(dict) --The destination for the asset.\n\nAssetId (string) -- [REQUIRED]The unique identifier for the asset.\n\nBucket (string) -- [REQUIRED]The S3 bucket that is the destination for the asset.\n\nKey (string) --The name of the object in Amazon S3 for the asset.\n\n\n\n\n\nDataSetId (string) -- [REQUIRED]The unique identifier for the data set associated with this export job.\n\nEncryption (dict) --Encryption configuration for the export job.\n\nKmsKeyArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.\n\nType (string) -- [REQUIRED]The type of server side encryption used for encrypting the objects in Amazon S3.\n\n\n\nRevisionId (string) -- [REQUIRED]The unique identifier for the revision associated with this export request.\n\n\n\nImportAssetFromSignedUrl (dict) --Details about the import from signed URL request.\n\nAssetName (string) -- [REQUIRED]The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.\n\nDataSetId (string) -- [REQUIRED]The unique identifier for the data set associated with this import job.\n\nMd5Hash (string) -- [REQUIRED]The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.\n\nRevisionId (string) -- [REQUIRED]The unique identifier for the revision associated with this import request.\n\n\n\nImportAssetsFromS3 (dict) --Details about the import from Amazon S3 request.\n\nAssetSources (list) -- [REQUIRED]Is a list of S3 bucket and object key pairs.\n\n(dict) --The source of the assets.\n\nBucket (string) -- [REQUIRED]The S3 bucket that\'s part of the source of the asset.\n\nKey (string) -- [REQUIRED]The name of the object in Amazon S3 for the asset.\n\n\n\n\n\nDataSetId (string) -- [REQUIRED]The unique identifier for the data set associated with this import job.\n\nRevisionId (string) -- [REQUIRED]The unique identifier for the revision associated with this import request.\n\n\n\n\n
:type Type: string
:param Type: [REQUIRED]\nThe type of job to be created.\n
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'CreatedAt': datetime(2015, 1, 1),
'Details': {
'ExportAssetToSignedUrl': {
'AssetId': 'string',
'DataSetId': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ExportAssetsToS3': {
'AssetDestinations': [
{
'AssetId': 'string',
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'Encryption': {
'KmsKeyArn': 'string',
'Type': 'aws:kms'|'AES256'
},
'RevisionId': 'string'
},
'ImportAssetFromSignedUrl': {
'AssetName': 'string',
'DataSetId': 'string',
'Md5Hash': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ImportAssetsFromS3': {
'AssetSources': [
{
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'RevisionId': 'string'
}
},
'Errors': [
{
'Code': 'ACCESS_DENIED_EXCEPTION'|'INTERNAL_SERVER_EXCEPTION'|'MALWARE_DETECTED'|'RESOURCE_NOT_FOUND_EXCEPTION'|'SERVICE_QUOTA_EXCEEDED_EXCEPTION'|'VALIDATION_EXCEPTION'|'MALWARE_SCAN_ENCRYPTED_FILE',
'Details': {
'ImportAssetFromSignedUrlJobErrorDetails': {
'AssetName': 'string'
},
'ImportAssetsFromS3JobErrorDetails': [
{
'Bucket': 'string',
'Key': 'string'
},
]
},
'LimitName': 'Assets per revision'|'Asset size in GB',
'LimitValue': 123.0,
'Message': 'string',
'ResourceId': 'string',
'ResourceType': 'REVISION'|'ASSET'
},
],
'Id': 'string',
'State': 'WAITING'|'IN_PROGRESS'|'ERROR'|'COMPLETED'|'CANCELLED'|'TIMED_OUT',
'Type': 'IMPORT_ASSETS_FROM_S3'|'IMPORT_ASSET_FROM_SIGNED_URL'|'EXPORT_ASSETS_TO_S3'|'EXPORT_ASSET_TO_SIGNED_URL',
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
201 response
Arn (string) --
The ARN for the job.
CreatedAt (datetime) --
The date and time that the job was created, in ISO 8601 format.
Details (dict) --
Details about the job.
ExportAssetToSignedUrl (dict) --
Details for the export to signed URL response.
AssetId (string) --
The unique identifier for the asset associated with this export job.
DataSetId (string) --
The unique identifier for the data set associated with this export job.
RevisionId (string) --
The unique identifier for the revision associated with this export response.
SignedUrl (string) --
The signed URL for the export request.
SignedUrlExpiresAt (datetime) --
The date and time that the signed URL expires, in ISO 8601 format.
ExportAssetsToS3 (dict) --
Details for the export to Amazon S3 response.
AssetDestinations (list) --
The destination in Amazon S3 where the asset is exported.
(dict) --
The destination for the asset.
AssetId (string) --
The unique identifier for the asset.
Bucket (string) --
The S3 bucket that is the destination for the asset.
Key (string) --
The name of the object in Amazon S3 for the asset.
DataSetId (string) --
The unique identifier for the data set associated with this export job.
Encryption (dict) --
Encryption configuration of the export job.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.
Type (string) --
The type of server side encryption used for encrypting the objects in Amazon S3.
RevisionId (string) --
The unique identifier for the revision associated with this export response.
ImportAssetFromSignedUrl (dict) --
Details for the import from signed URL response.
AssetName (string) --
The name for the asset associated with this import response.
DataSetId (string) --
The unique identifier for the data set associated with this import job.
Md5Hash (string) --
The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.
RevisionId (string) --
The unique identifier for the revision associated with this import response.
SignedUrl (string) --
The signed URL.
SignedUrlExpiresAt (datetime) --
The time and date at which the signed URL expires, in ISO 8601 format.
ImportAssetsFromS3 (dict) --
Details for the import from Amazon S3 response.
AssetSources (list) --
Is a list of Amazon S3 bucket and object key pairs.
(dict) --
The source of the assets.
Bucket (string) --
The S3 bucket that\'s part of the source of the asset.
Key (string) --
The name of the object in Amazon S3 for the asset.
DataSetId (string) --
The unique identifier for the data set associated with this import job.
RevisionId (string) --
The unique identifier for the revision associated with this import response.
Errors (list) --
The errors associated with jobs.
(dict) -- An error that occurred with the job request.
Code (string) -- The code for the job error.
Details (dict) --
ImportAssetFromSignedUrlJobErrorDetails (dict) --
AssetName (string) --
The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
ImportAssetsFromS3JobErrorDetails (list) --
The list of sources for the assets.
(dict) --
The source of the assets.
Bucket (string) --
The S3 bucket that\'s part of the source of the asset.
Key (string) --
The name of the object in Amazon S3 for the asset.
LimitName (string) --
The name of the limit that was reached.
LimitValue (float) -- The value of the exceeded limit.
Message (string) -- The message related to the job error.
ResourceId (string) -- The unique identifier for the resource related to the error.
ResourceType (string) -- The type of resource related to the error.
Id (string) --
The unique identifier for the job.
State (string) --
The state of the job.
Type (string) --
The job type.
UpdatedAt (datetime) --
The date and time that the job was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
:return: {
'Arn': 'string',
'CreatedAt': datetime(2015, 1, 1),
'Details': {
'ExportAssetToSignedUrl': {
'AssetId': 'string',
'DataSetId': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ExportAssetsToS3': {
'AssetDestinations': [
{
'AssetId': 'string',
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'Encryption': {
'KmsKeyArn': 'string',
'Type': 'aws:kms'|'AES256'
},
'RevisionId': 'string'
},
'ImportAssetFromSignedUrl': {
'AssetName': 'string',
'DataSetId': 'string',
'Md5Hash': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ImportAssetsFromS3': {
'AssetSources': [
{
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'RevisionId': 'string'
}
},
'Errors': [
{
'Code': 'ACCESS_DENIED_EXCEPTION'|'INTERNAL_SERVER_EXCEPTION'|'MALWARE_DETECTED'|'RESOURCE_NOT_FOUND_EXCEPTION'|'SERVICE_QUOTA_EXCEEDED_EXCEPTION'|'VALIDATION_EXCEPTION'|'MALWARE_SCAN_ENCRYPTED_FILE',
'Details': {
'ImportAssetFromSignedUrlJobErrorDetails': {
'AssetName': 'string'
},
'ImportAssetsFromS3JobErrorDetails': [
{
'Bucket': 'string',
'Key': 'string'
},
]
},
'LimitName': 'Assets per revision'|'Asset size in GB',
'LimitValue': 123.0,
'Message': 'string',
'ResourceId': 'string',
'ResourceType': 'REVISION'|'ASSET'
},
],
'Id': 'string',
'State': 'WAITING'|'IN_PROGRESS'|'ERROR'|'COMPLETED'|'CANCELLED'|'TIMED_OUT',
'Type': 'IMPORT_ASSETS_FROM_S3'|'IMPORT_ASSET_FROM_SIGNED_URL'|'EXPORT_ASSETS_TO_S3'|'EXPORT_ASSET_TO_SIGNED_URL',
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
"""
pass
def create_revision(Comment=None, DataSetId=None, Tags=None):
"""
This operation creates a revision for a data set.
See also: AWS API Documentation
Exceptions
:example: response = client.create_revision(
Comment='string',
DataSetId='string',
Tags={
'string': 'string'
}
)
:type Comment: string
:param Comment: An optional comment about the revision.
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type Tags: dict
:param Tags: A revision tag is an optional label that you can assign to a revision when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
201 response
Arn (string) --
The ARN for the revision
Comment (string) --
An optional comment about the revision.
CreatedAt (datetime) --
The date and time that the revision was created, in ISO 8601 format.
DataSetId (string) --
The unique identifier for the data set associated with this revision.
Finalized (boolean) --
To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it\'s in this read-only state, you can publish the revision to your products.
Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.
Id (string) --
The unique identifier for the revision.
SourceId (string) --
The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.
Tags (dict) --
The tags for the revision.
(string) --
(string) --
UpdatedAt (datetime) --
The date and time that the revision was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
:return: {
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
(string) --
(string) --
"""
pass
def delete_asset(AssetId=None, DataSetId=None, RevisionId=None):
"""
This operation deletes an asset.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_asset(
AssetId='string',
DataSetId='string',
RevisionId='string'
)
:type AssetId: string
:param AssetId: [REQUIRED]\nThe unique identifier for an asset.\n
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type RevisionId: string
:param RevisionId: [REQUIRED]\nThe unique identifier for a revision.\n
:returns:
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ConflictException
"""
pass
def delete_data_set(DataSetId=None):
"""
This operation deletes a data set.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_data_set(
DataSetId='string'
)
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
"""
pass
def delete_revision(DataSetId=None, RevisionId=None):
"""
This operation deletes a revision.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_revision(
DataSetId='string',
RevisionId='string'
)
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type RevisionId: string
:param RevisionId: [REQUIRED]\nThe unique identifier for a revision.\n
:returns:
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ConflictException
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_asset(AssetId=None, DataSetId=None, RevisionId=None):
"""
This operation returns information about an asset.
See also: AWS API Documentation
Exceptions
:example: response = client.get_asset(
AssetId='string',
DataSetId='string',
RevisionId='string'
)
:type AssetId: string
:param AssetId: [REQUIRED]\nThe unique identifier for an asset.\n
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type RevisionId: string
:param RevisionId: [REQUIRED]\nThe unique identifier for a revision.\n
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'AssetDetails': {
'S3SnapshotAsset': {
'Size': 123.0
}
},
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Id': 'string',
'Name': 'string',
'RevisionId': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
200 response
Arn (string) --
The ARN for the asset.
AssetDetails (dict) --
Information about the asset, including its size.
S3SnapshotAsset (dict) --
The S3 object that is the asset.
Size (float) --
The size of the S3 object that is the object.
AssetType (string) --
The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
CreatedAt (datetime) --
The date and time that the asset was created, in ISO 8601 format.
DataSetId (string) --
The unique identifier for the data set associated with this asset.
Id (string) --
The unique identifier for the asset.
Name (string) --
The name of the asset When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
RevisionId (string) --
The unique identifier for the revision associated with this asset.
SourceId (string) --
The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.
UpdatedAt (datetime) --
The date and time that the asset was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'Arn': 'string',
'AssetDetails': {
'S3SnapshotAsset': {
'Size': 123.0
}
},
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Id': 'string',
'Name': 'string',
'RevisionId': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
"""
pass
def get_data_set(DataSetId=None):
"""
This operation returns information about a data set.
See also: AWS API Documentation
Exceptions
:example: response = client.get_data_set(
DataSetId='string'
)
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:rtype: dict
ReturnsResponse Syntax{
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --200 response
Arn (string) --The ARN for the data set.
AssetType (string) --The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
CreatedAt (datetime) --The date and time that the data set was created, in ISO 8601 format.
Description (string) --The description for the data set.
Id (string) --The unique identifier for the data set.
Name (string) --The name of the data set.
Origin (string) --A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
OriginDetails (dict) --If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.
ProductId (string) --
SourceId (string) --The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.
Tags (dict) --The tags for the data set.
(string) --
(string) --
UpdatedAt (datetime) --The date and time that the data set was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
(string) --
(string) --
"""
pass
def get_job(JobId=None):
"""
This operation returns information about a job.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job(
JobId='string'
)
:type JobId: string
:param JobId: [REQUIRED]\nThe unique identifier for a job.\n
:rtype: dict
ReturnsResponse Syntax{
'Arn': 'string',
'CreatedAt': datetime(2015, 1, 1),
'Details': {
'ExportAssetToSignedUrl': {
'AssetId': 'string',
'DataSetId': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ExportAssetsToS3': {
'AssetDestinations': [
{
'AssetId': 'string',
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'Encryption': {
'KmsKeyArn': 'string',
'Type': 'aws:kms'|'AES256'
},
'RevisionId': 'string'
},
'ImportAssetFromSignedUrl': {
'AssetName': 'string',
'DataSetId': 'string',
'Md5Hash': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ImportAssetsFromS3': {
'AssetSources': [
{
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'RevisionId': 'string'
}
},
'Errors': [
{
'Code': 'ACCESS_DENIED_EXCEPTION'|'INTERNAL_SERVER_EXCEPTION'|'MALWARE_DETECTED'|'RESOURCE_NOT_FOUND_EXCEPTION'|'SERVICE_QUOTA_EXCEEDED_EXCEPTION'|'VALIDATION_EXCEPTION'|'MALWARE_SCAN_ENCRYPTED_FILE',
'Details': {
'ImportAssetFromSignedUrlJobErrorDetails': {
'AssetName': 'string'
},
'ImportAssetsFromS3JobErrorDetails': [
{
'Bucket': 'string',
'Key': 'string'
},
]
},
'LimitName': 'Assets per revision'|'Asset size in GB',
'LimitValue': 123.0,
'Message': 'string',
'ResourceId': 'string',
'ResourceType': 'REVISION'|'ASSET'
},
],
'Id': 'string',
'State': 'WAITING'|'IN_PROGRESS'|'ERROR'|'COMPLETED'|'CANCELLED'|'TIMED_OUT',
'Type': 'IMPORT_ASSETS_FROM_S3'|'IMPORT_ASSET_FROM_SIGNED_URL'|'EXPORT_ASSETS_TO_S3'|'EXPORT_ASSET_TO_SIGNED_URL',
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --200 response
Arn (string) --The ARN for the job.
CreatedAt (datetime) --The date and time that the job was created, in ISO 8601 format.
Details (dict) --Details about the job.
ExportAssetToSignedUrl (dict) --Details for the export to signed URL response.
AssetId (string) --The unique identifier for the asset associated with this export job.
DataSetId (string) --The unique identifier for the data set associated with this export job.
RevisionId (string) --The unique identifier for the revision associated with this export response.
SignedUrl (string) --The signed URL for the export request.
SignedUrlExpiresAt (datetime) --The date and time that the signed URL expires, in ISO 8601 format.
ExportAssetsToS3 (dict) --Details for the export to Amazon S3 response.
AssetDestinations (list) --The destination in Amazon S3 where the asset is exported.
(dict) --The destination for the asset.
AssetId (string) --The unique identifier for the asset.
Bucket (string) --The S3 bucket that is the destination for the asset.
Key (string) --The name of the object in Amazon S3 for the asset.
DataSetId (string) --The unique identifier for the data set associated with this export job.
Encryption (dict) --Encryption configuration of the export job.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.
Type (string) --The type of server side encryption used for encrypting the objects in Amazon S3.
RevisionId (string) --The unique identifier for the revision associated with this export response.
ImportAssetFromSignedUrl (dict) --Details for the import from signed URL response.
AssetName (string) --The name for the asset associated with this import response.
DataSetId (string) --The unique identifier for the data set associated with this import job.
Md5Hash (string) --The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.
RevisionId (string) --The unique identifier for the revision associated with this import response.
SignedUrl (string) --The signed URL.
SignedUrlExpiresAt (datetime) --The time and date at which the signed URL expires, in ISO 8601 format.
ImportAssetsFromS3 (dict) --Details for the import from Amazon S3 response.
AssetSources (list) --Is a list of Amazon S3 bucket and object key pairs.
(dict) --The source of the assets.
Bucket (string) --The S3 bucket that\'s part of the source of the asset.
Key (string) --The name of the object in Amazon S3 for the asset.
DataSetId (string) --The unique identifier for the data set associated with this import job.
RevisionId (string) --The unique identifier for the revision associated with this import response.
Errors (list) --The errors associated with jobs.
(dict) -- An error that occurred with the job request.
Code (string) -- The code for the job error.
Details (dict) --
ImportAssetFromSignedUrlJobErrorDetails (dict) --
AssetName (string) --The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
ImportAssetsFromS3JobErrorDetails (list) --The list of sources for the assets.
(dict) --The source of the assets.
Bucket (string) --The S3 bucket that\'s part of the source of the asset.
Key (string) --The name of the object in Amazon S3 for the asset.
LimitName (string) --The name of the limit that was reached.
LimitValue (float) -- The value of the exceeded limit.
Message (string) -- The message related to the job error.
ResourceId (string) -- The unique identifier for the resource related to the error.
ResourceType (string) -- The type of resource related to the error.
Id (string) --The unique identifier for the job.
State (string) --The state of the job.
Type (string) --The job type.
UpdatedAt (datetime) --The date and time that the job was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'Arn': 'string',
'CreatedAt': datetime(2015, 1, 1),
'Details': {
'ExportAssetToSignedUrl': {
'AssetId': 'string',
'DataSetId': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ExportAssetsToS3': {
'AssetDestinations': [
{
'AssetId': 'string',
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'Encryption': {
'KmsKeyArn': 'string',
'Type': 'aws:kms'|'AES256'
},
'RevisionId': 'string'
},
'ImportAssetFromSignedUrl': {
'AssetName': 'string',
'DataSetId': 'string',
'Md5Hash': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ImportAssetsFromS3': {
'AssetSources': [
{
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'RevisionId': 'string'
}
},
'Errors': [
{
'Code': 'ACCESS_DENIED_EXCEPTION'|'INTERNAL_SERVER_EXCEPTION'|'MALWARE_DETECTED'|'RESOURCE_NOT_FOUND_EXCEPTION'|'SERVICE_QUOTA_EXCEEDED_EXCEPTION'|'VALIDATION_EXCEPTION'|'MALWARE_SCAN_ENCRYPTED_FILE',
'Details': {
'ImportAssetFromSignedUrlJobErrorDetails': {
'AssetName': 'string'
},
'ImportAssetsFromS3JobErrorDetails': [
{
'Bucket': 'string',
'Key': 'string'
},
]
},
'LimitName': 'Assets per revision'|'Asset size in GB',
'LimitValue': 123.0,
'Message': 'string',
'ResourceId': 'string',
'ResourceType': 'REVISION'|'ASSET'
},
],
'Id': 'string',
'State': 'WAITING'|'IN_PROGRESS'|'ERROR'|'COMPLETED'|'CANCELLED'|'TIMED_OUT',
'Type': 'IMPORT_ASSETS_FROM_S3'|'IMPORT_ASSET_FROM_SIGNED_URL'|'EXPORT_ASSETS_TO_S3'|'EXPORT_ASSET_TO_SIGNED_URL',
'UpdatedAt': datetime(2015, 1, 1)
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_revision(DataSetId=None, RevisionId=None):
"""
This operation returns information about a revision.
See also: AWS API Documentation
Exceptions
:example: response = client.get_revision(
DataSetId='string',
RevisionId='string'
)
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type RevisionId: string
:param RevisionId: [REQUIRED]\nThe unique identifier for a revision.\n
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
200 response
Arn (string) --
The ARN for the revision
Comment (string) --
An optional comment about the revision.
CreatedAt (datetime) --
The date and time that the revision was created, in ISO 8601 format.
DataSetId (string) --
The unique identifier for the data set associated with this revision.
Finalized (boolean) --
To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it\'s in this read-only state, you can publish the revision to your products.
Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.
Id (string) --
The unique identifier for the revision.
SourceId (string) --
The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.
Tags (dict) --
The tags for the revision.
(string) --
(string) --
UpdatedAt (datetime) --
The date and time that the revision was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'Tags': {
'string': 'string'
},
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
(string) --
(string) --
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_data_set_revisions(DataSetId=None, MaxResults=None, NextToken=None):
"""
This operation lists a data set\'s revisions sorted by CreatedAt in descending order.
See also: AWS API Documentation
Exceptions
:example: response = client.list_data_set_revisions(
DataSetId='string',
MaxResults=123,
NextToken='string'
)
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type MaxResults: integer
:param MaxResults: The maximum number of results returned by a single call.
:type NextToken: string
:param NextToken: The token value retrieved from a previous call to access the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'Revisions': [
{
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
},
]
}
Response Structure
(dict) --
200 response
NextToken (string) --
The token value retrieved from a previous call to access the next page of results.
Revisions (list) --
The asset objects listed by the request.
(dict) --
A revision is a container for one or more assets.
Arn (string) --
The ARN for the revision.
Comment (string) --
An optional comment about the revision.
CreatedAt (datetime) --
The date and time that the revision was created, in ISO 8601 format.
DataSetId (string) --
The unique identifier for the data set associated with this revision.
Finalized (boolean) --
To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it\'s in this read-only state, you can publish the revision to your products.
Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.
Id (string) --
The unique identifier for the revision.
SourceId (string) --
The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.
UpdatedAt (datetime) --
The date and time that the revision was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'NextToken': 'string',
'Revisions': [
{
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
},
]
}
:returns:
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
"""
pass
def list_data_sets(MaxResults=None, NextToken=None, Origin=None):
"""
This operation lists your data sets. When listing by origin OWNED, results are sorted by CreatedAt in descending order. When listing by origin ENTITLED, there is no order and the maxResults parameter is ignored.
See also: AWS API Documentation
Exceptions
:example: response = client.list_data_sets(
MaxResults=123,
NextToken='string',
Origin='string'
)
:type MaxResults: integer
:param MaxResults: The maximum number of results returned by a single call.
:type NextToken: string
:param NextToken: The token value retrieved from a previous call to access the next page of results.
:type Origin: string
:param Origin: A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
:rtype: dict
ReturnsResponse Syntax
{
'DataSets': [
{
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
200 response
DataSets (list) --
The data set objects listed by the request.
(dict) --
A data set is an AWS resource with one or more revisions.
Arn (string) --
The ARN for the data set.
AssetType (string) --
The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
CreatedAt (datetime) --
The date and time that the data set was created, in ISO 8601 format.
Description (string) --
The description for the data set.
Id (string) --
The unique identifier for the data set.
Name (string) --
The name of the data set.
Origin (string) --
A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
OriginDetails (dict) --
If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.
ProductId (string) --
SourceId (string) --
The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.
UpdatedAt (datetime) --
The date and time that the data set was last updated, in ISO 8601 format.
NextToken (string) --
The token value retrieved from a previous call to access the next page of results.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'DataSets': [
{
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
:returns:
ProductId (string) --
"""
pass
def list_jobs(DataSetId=None, MaxResults=None, NextToken=None, RevisionId=None):
"""
This operation lists your jobs sorted by CreatedAt in descending order.
See also: AWS API Documentation
Exceptions
:example: response = client.list_jobs(
DataSetId='string',
MaxResults=123,
NextToken='string',
RevisionId='string'
)
:type DataSetId: string
:param DataSetId: The unique identifier for a data set.
:type MaxResults: integer
:param MaxResults: The maximum number of results returned by a single call.
:type NextToken: string
:param NextToken: The token value retrieved from a previous call to access the next page of results.
:type RevisionId: string
:param RevisionId: The unique identifier for a revision.
:rtype: dict
ReturnsResponse Syntax
{
'Jobs': [
{
'Arn': 'string',
'CreatedAt': datetime(2015, 1, 1),
'Details': {
'ExportAssetToSignedUrl': {
'AssetId': 'string',
'DataSetId': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ExportAssetsToS3': {
'AssetDestinations': [
{
'AssetId': 'string',
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'Encryption': {
'KmsKeyArn': 'string',
'Type': 'aws:kms'|'AES256'
},
'RevisionId': 'string'
},
'ImportAssetFromSignedUrl': {
'AssetName': 'string',
'DataSetId': 'string',
'Md5Hash': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ImportAssetsFromS3': {
'AssetSources': [
{
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'RevisionId': 'string'
}
},
'Errors': [
{
'Code': 'ACCESS_DENIED_EXCEPTION'|'INTERNAL_SERVER_EXCEPTION'|'MALWARE_DETECTED'|'RESOURCE_NOT_FOUND_EXCEPTION'|'SERVICE_QUOTA_EXCEEDED_EXCEPTION'|'VALIDATION_EXCEPTION'|'MALWARE_SCAN_ENCRYPTED_FILE',
'Details': {
'ImportAssetFromSignedUrlJobErrorDetails': {
'AssetName': 'string'
},
'ImportAssetsFromS3JobErrorDetails': [
{
'Bucket': 'string',
'Key': 'string'
},
]
},
'LimitName': 'Assets per revision'|'Asset size in GB',
'LimitValue': 123.0,
'Message': 'string',
'ResourceId': 'string',
'ResourceType': 'REVISION'|'ASSET'
},
],
'Id': 'string',
'State': 'WAITING'|'IN_PROGRESS'|'ERROR'|'COMPLETED'|'CANCELLED'|'TIMED_OUT',
'Type': 'IMPORT_ASSETS_FROM_S3'|'IMPORT_ASSET_FROM_SIGNED_URL'|'EXPORT_ASSETS_TO_S3'|'EXPORT_ASSET_TO_SIGNED_URL',
'UpdatedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
200 response
Jobs (list) --
The jobs listed by the request.
(dict) -- AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created.
Arn (string) --
The ARN for the job.
CreatedAt (datetime) --
The date and time that the job was created, in ISO 8601 format.
Details (dict) --
Details of the operation to be performed by the job, such as export destination details or import source details.
ExportAssetToSignedUrl (dict) --
Details for the export to signed URL response.
AssetId (string) --
The unique identifier for the asset associated with this export job.
DataSetId (string) --
The unique identifier for the data set associated with this export job.
RevisionId (string) --
The unique identifier for the revision associated with this export response.
SignedUrl (string) --
The signed URL for the export request.
SignedUrlExpiresAt (datetime) --
The date and time that the signed URL expires, in ISO 8601 format.
ExportAssetsToS3 (dict) --
Details for the export to Amazon S3 response.
AssetDestinations (list) --
The destination in Amazon S3 where the asset is exported.
(dict) --
The destination for the asset.
AssetId (string) --
The unique identifier for the asset.
Bucket (string) --
The S3 bucket that is the destination for the asset.
Key (string) --
The name of the object in Amazon S3 for the asset.
DataSetId (string) --
The unique identifier for the data set associated with this export job.
Encryption (dict) --
Encryption configuration of the export job.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.
Type (string) --
The type of server side encryption used for encrypting the objects in Amazon S3.
RevisionId (string) --
The unique identifier for the revision associated with this export response.
ImportAssetFromSignedUrl (dict) --
Details for the import from signed URL response.
AssetName (string) --
The name for the asset associated with this import response.
DataSetId (string) --
The unique identifier for the data set associated with this import job.
Md5Hash (string) --
The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.
RevisionId (string) --
The unique identifier for the revision associated with this import response.
SignedUrl (string) --
The signed URL.
SignedUrlExpiresAt (datetime) --
The time and date at which the signed URL expires, in ISO 8601 format.
ImportAssetsFromS3 (dict) --
Details for the import from Amazon S3 response.
AssetSources (list) --
Is a list of Amazon S3 bucket and object key pairs.
(dict) --
The source of the assets.
Bucket (string) --
The S3 bucket that\'s part of the source of the asset.
Key (string) --
The name of the object in Amazon S3 for the asset.
DataSetId (string) --
The unique identifier for the data set associated with this import job.
RevisionId (string) --
The unique identifier for the revision associated with this import response.
Errors (list) --
Errors for jobs.
(dict) -- An error that occurred with the job request.
Code (string) -- The code for the job error.
Details (dict) --
ImportAssetFromSignedUrlJobErrorDetails (dict) --
AssetName (string) --
The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
ImportAssetsFromS3JobErrorDetails (list) --
The list of sources for the assets.
(dict) --
The source of the assets.
Bucket (string) --
The S3 bucket that\'s part of the source of the asset.
Key (string) --
The name of the object in Amazon S3 for the asset.
LimitName (string) --
The name of the limit that was reached.
LimitValue (float) -- The value of the exceeded limit.
Message (string) -- The message related to the job error.
ResourceId (string) -- The unique identifier for the resource related to the error.
ResourceType (string) -- The type of resource related to the error.
Id (string) --
The unique identifier for the job.
State (string) --
The state of the job.
Type (string) --
The job type.
UpdatedAt (datetime) --
The date and time that the job was last updated, in ISO 8601 format.
NextToken (string) --
The token value retrieved from a previous call to access the next page of results.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'Jobs': [
{
'Arn': 'string',
'CreatedAt': datetime(2015, 1, 1),
'Details': {
'ExportAssetToSignedUrl': {
'AssetId': 'string',
'DataSetId': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ExportAssetsToS3': {
'AssetDestinations': [
{
'AssetId': 'string',
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'Encryption': {
'KmsKeyArn': 'string',
'Type': 'aws:kms'|'AES256'
},
'RevisionId': 'string'
},
'ImportAssetFromSignedUrl': {
'AssetName': 'string',
'DataSetId': 'string',
'Md5Hash': 'string',
'RevisionId': 'string',
'SignedUrl': 'string',
'SignedUrlExpiresAt': datetime(2015, 1, 1)
},
'ImportAssetsFromS3': {
'AssetSources': [
{
'Bucket': 'string',
'Key': 'string'
},
],
'DataSetId': 'string',
'RevisionId': 'string'
}
},
'Errors': [
{
'Code': 'ACCESS_DENIED_EXCEPTION'|'INTERNAL_SERVER_EXCEPTION'|'MALWARE_DETECTED'|'RESOURCE_NOT_FOUND_EXCEPTION'|'SERVICE_QUOTA_EXCEEDED_EXCEPTION'|'VALIDATION_EXCEPTION'|'MALWARE_SCAN_ENCRYPTED_FILE',
'Details': {
'ImportAssetFromSignedUrlJobErrorDetails': {
'AssetName': 'string'
},
'ImportAssetsFromS3JobErrorDetails': [
{
'Bucket': 'string',
'Key': 'string'
},
]
},
'LimitName': 'Assets per revision'|'Asset size in GB',
'LimitValue': 123.0,
'Message': 'string',
'ResourceId': 'string',
'ResourceType': 'REVISION'|'ASSET'
},
],
'Id': 'string',
'State': 'WAITING'|'IN_PROGRESS'|'ERROR'|'COMPLETED'|'CANCELLED'|'TIMED_OUT',
'Type': 'IMPORT_ASSETS_FROM_S3'|'IMPORT_ASSET_FROM_SIGNED_URL'|'EXPORT_ASSETS_TO_S3'|'EXPORT_ASSET_TO_SIGNED_URL',
'UpdatedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
:returns:
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
"""
pass
def list_revision_assets(DataSetId=None, MaxResults=None, NextToken=None, RevisionId=None):
"""
This operation lists a revision\'s assets sorted alphabetically in descending order.
See also: AWS API Documentation
Exceptions
:example: response = client.list_revision_assets(
DataSetId='string',
MaxResults=123,
NextToken='string',
RevisionId='string'
)
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type MaxResults: integer
:param MaxResults: The maximum number of results returned by a single call.
:type NextToken: string
:param NextToken: The token value retrieved from a previous call to access the next page of results.
:type RevisionId: string
:param RevisionId: [REQUIRED]\nThe unique identifier for a revision.\n
:rtype: dict
ReturnsResponse Syntax
{
'Assets': [
{
'Arn': 'string',
'AssetDetails': {
'S3SnapshotAsset': {
'Size': 123.0
}
},
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Id': 'string',
'Name': 'string',
'RevisionId': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
200 response
Assets (list) --
The asset objects listed by the request.
(dict) --
An asset in AWS Data Exchange is a piece of data that can be stored as an S3 object. The asset can be a structured data file, an image file, or some other data file. When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.
Arn (string) --
The ARN for the asset.
AssetDetails (dict) --
Information about the asset, including its size.
S3SnapshotAsset (dict) --
The S3 object that is the asset.
Size (float) --
The size of the S3 object that is the object.
AssetType (string) --
The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
CreatedAt (datetime) --
The date and time that the asset was created, in ISO 8601 format.
DataSetId (string) --
The unique identifier for the data set associated with this asset.
Id (string) --
The unique identifier for the asset.
Name (string) --
The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
RevisionId (string) --
The unique identifier for the revision associated with this asset.
SourceId (string) --
The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.
UpdatedAt (datetime) --
The date and time that the asset was last updated, in ISO 8601 format.
NextToken (string) --
The token value retrieved from a previous call to access the next page of results.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
:return: {
'Assets': [
{
'Arn': 'string',
'AssetDetails': {
'S3SnapshotAsset': {
'Size': 123.0
}
},
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Id': 'string',
'Name': 'string',
'RevisionId': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
:returns:
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
"""
pass
def list_tags_for_resource(ResourceArn=None):
"""
This operation lists the tags on the resource.
See also: AWS API Documentation
:example: response = client.list_tags_for_resource(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nAn Amazon Resource Name (ARN) that uniquely identifies an AWS resource.\n
:rtype: dict
ReturnsResponse Syntax{
'Tags': {
'string': 'string'
}
}
Response Structure
(dict) --200 response
Tags (dict) -- A label that consists of a customer-defined key and an optional value.
(string) --
(string) --
:return: {
'Tags': {
'string': 'string'
}
}
"""
pass
def start_job(JobId=None):
"""
This operation starts a job.
See also: AWS API Documentation
Exceptions
:example: response = client.start_job(
JobId='string'
)
:type JobId: string
:param JobId: [REQUIRED]\nThe unique identifier for a job.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --202 response
Exceptions
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ConflictException
:return: {}
"""
pass
def tag_resource(ResourceArn=None, Tags=None):
"""
This operation tags a resource.
See also: AWS API Documentation
:example: response = client.tag_resource(
ResourceArn='string',
Tags={
'string': 'string'
}
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nAn Amazon Resource Name (ARN) that uniquely identifies an AWS resource.\n
:type Tags: dict
:param Tags: [REQUIRED] A label that consists of a customer-defined key and an optional value.\n\n(string) --\n(string) --\n\n\n\n
"""
pass
def untag_resource(ResourceArn=None, TagKeys=None):
"""
This operation removes one or more tags from a resource.
See also: AWS API Documentation
:example: response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nAn Amazon Resource Name (ARN) that uniquely identifies an AWS resource.\n
:type TagKeys: list
:param TagKeys: [REQUIRED] The key tags.\n\n(string) --\n\n
"""
pass
def update_asset(AssetId=None, DataSetId=None, Name=None, RevisionId=None):
"""
This operation updates an asset.
See also: AWS API Documentation
Exceptions
:example: response = client.update_asset(
AssetId='string',
DataSetId='string',
Name='string',
RevisionId='string'
)
:type AssetId: string
:param AssetId: [REQUIRED]\nThe unique identifier for an asset.\n
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.\n
:type RevisionId: string
:param RevisionId: [REQUIRED]\nThe unique identifier for a revision.\n
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'AssetDetails': {
'S3SnapshotAsset': {
'Size': 123.0
}
},
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Id': 'string',
'Name': 'string',
'RevisionId': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
200 response
Arn (string) --
The ARN for the asset.
AssetDetails (dict) --
Information about the asset, including its size.
S3SnapshotAsset (dict) --
The S3 object that is the asset.
Size (float) --
The size of the S3 object that is the object.
AssetType (string) --
The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
CreatedAt (datetime) --
The date and time that the asset was created, in ISO 8601 format.
DataSetId (string) --
The unique identifier for the data set associated with this asset.
Id (string) --
The unique identifier for the asset.
Name (string) --
The name of the asset When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.
RevisionId (string) --
The unique identifier for the revision associated with this asset.
SourceId (string) --
The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.
UpdatedAt (datetime) --
The date and time that the asset was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ConflictException
:return: {
'Arn': 'string',
'AssetDetails': {
'S3SnapshotAsset': {
'Size': 123.0
}
},
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Id': 'string',
'Name': 'string',
'RevisionId': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ConflictException
"""
pass
def update_data_set(DataSetId=None, Description=None, Name=None):
"""
This operation updates a data set.
See also: AWS API Documentation
Exceptions
:example: response = client.update_data_set(
DataSetId='string',
Description='string',
Name='string'
)
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type Description: string
:param Description: The description for the data set.
:type Name: string
:param Name: The name of the data set.
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
200 response
Arn (string) --
The ARN for the data set.
AssetType (string) --
The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.
CreatedAt (datetime) --
The date and time that the data set was created, in ISO 8601 format.
Description (string) --
The description for the data set.
Id (string) --
The unique identifier for the data set.
Name (string) --
The name of the data set.
Origin (string) --
A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).
OriginDetails (dict) --
If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.
ProductId (string) --
SourceId (string) --
The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.
UpdatedAt (datetime) --
The date and time that the data set was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
:return: {
'Arn': 'string',
'AssetType': 'S3_SNAPSHOT',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Id': 'string',
'Name': 'string',
'Origin': 'OWNED'|'ENTITLED',
'OriginDetails': {
'ProductId': 'string'
},
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
ProductId (string) --
"""
pass
def update_revision(Comment=None, DataSetId=None, Finalized=None, RevisionId=None):
"""
This operation updates a revision.
See also: AWS API Documentation
Exceptions
:example: response = client.update_revision(
Comment='string',
DataSetId='string',
Finalized=True|False,
RevisionId='string'
)
:type Comment: string
:param Comment: An optional comment about the revision.
:type DataSetId: string
:param DataSetId: [REQUIRED]\nThe unique identifier for a data set.\n
:type Finalized: boolean
:param Finalized: Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it\'s in this read-only state, you can publish the revision to your products.
:type RevisionId: string
:param RevisionId: [REQUIRED]\nThe unique identifier for a revision.\n
:rtype: dict
ReturnsResponse Syntax
{
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
200 response
Arn (string) --
The ARN for the revision.
Comment (string) --
An optional comment about the revision.
CreatedAt (datetime) --
The date and time that the revision was created, in ISO 8601 format.
DataSetId (string) --
The unique identifier for the data set associated with this revision.
Finalized (boolean) --
To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that changes to the assets in the revision are complete. After it\'s in this read-only state, you can publish the revision to your products.
Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.
Id (string) --
The unique identifier for the revision.
SourceId (string) --
The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.
UpdatedAt (datetime) --
The date and time that the revision was last updated, in ISO 8601 format.
Exceptions
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ConflictException
:return: {
'Arn': 'string',
'Comment': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DataSetId': 'string',
'Finalized': True|False,
'Id': 'string',
'SourceId': 'string',
'UpdatedAt': datetime(2015, 1, 1)
}
:returns:
DataExchange.Client.exceptions.ValidationException
DataExchange.Client.exceptions.InternalServerException
DataExchange.Client.exceptions.AccessDeniedException
DataExchange.Client.exceptions.ResourceNotFoundException
DataExchange.Client.exceptions.ThrottlingException
DataExchange.Client.exceptions.ConflictException
"""
pass
| 28.554002
| 2,701
| 0.629564
| 9,089
| 81,693
| 5.624381
| 0.052701
| 0.028521
| 0.062989
| 0.029695
| 0.91919
| 0.903404
| 0.889163
| 0.884351
| 0.882062
| 0.873083
| 0
| 0.014695
| 0.275275
| 81,693
| 2,860
| 2,702
| 28.563986
| 0.848746
| 0.972862
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
45591fb5a391b212b2d77aa545b08af2dd457d22
| 163
|
py
|
Python
|
src/codeacademy/cars.py
|
mketiku/python-tutorials
|
57f05ed78d5391b0c551c7e064a1b2f4304a3c82
|
[
"MIT"
] | 1
|
2022-03-30T00:22:21.000Z
|
2022-03-30T00:22:21.000Z
|
src/codeacademy/cars.py
|
mketiku/python-tutorials
|
57f05ed78d5391b0c551c7e064a1b2f4304a3c82
|
[
"MIT"
] | null | null | null |
src/codeacademy/cars.py
|
mketiku/python-tutorials
|
57f05ed78d5391b0c551c7e064a1b2f4304a3c82
|
[
"MIT"
] | null | null | null |
MyGarage = "Ferrari" , "Toyota" , "Honda"
for each_car in MyGarage , SamsGarage:
print (each_car)
for each_car in MyGarage + SamsGarage:
print (each_car)
| 23.285714
| 41
| 0.699387
| 22
| 163
| 5
| 0.454545
| 0.254545
| 0.181818
| 0.218182
| 0.763636
| 0.763636
| 0.763636
| 0.763636
| 0.763636
| 0
| 0
| 0
| 0.196319
| 163
| 6
| 42
| 27.166667
| 0.839695
| 0
| 0
| 0.4
| 0
| 0
| 0.110429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4594aa5e1d5140e7daf1f5d4c10e70e5b91144aa
| 4,607
|
py
|
Python
|
torpido/wavelet/wavelets/sym16.py
|
AP-Atul/Torpido
|
a646b4d6de7f2e2c96de4c64ce3113f53e3931c2
|
[
"Unlicense"
] | 21
|
2020-12-23T07:13:10.000Z
|
2022-01-12T10:32:22.000Z
|
wavelet/wavelets/sym16.py
|
AP-Atul/wavelets-ext
|
00ced22462c369584ebd32f9b5f357f092de0142
|
[
"MIT"
] | 2
|
2020-12-30T10:45:42.000Z
|
2021-09-25T09:52:00.000Z
|
wavelet/wavelets/sym16.py
|
AP-Atul/wavelets-ext
|
00ced22462c369584ebd32f9b5f357f092de0142
|
[
"MIT"
] | 1
|
2021-02-06T21:39:41.000Z
|
2021-02-06T21:39:41.000Z
|
""" Symlet 16 wavelet """
class Symlet16:
"""
Properties
----------
near symmetric, orthogonal, biorthogonal
All values are from http://wavelets.pybytes.com/wavelet/sym16/
"""
__name__ = "Symlet Wavelet 16"
__motherWaveletLength__ = 32 # length of the mother wavelet
__transformWaveletLength__ = 2 # minimum wavelength of input signal
# decomposition filter
# low-pass
decompositionLowFilter = [
6.230006701220761e-06,
-3.113556407621969e-06,
-0.00010943147929529757,
2.8078582128442894e-05,
0.0008523547108047095,
-0.0001084456223089688,
-0.0038809122526038786,
0.0007182119788317892,
0.012666731659857348,
-0.0031265171722710075,
-0.031051202843553064,
0.004869274404904607,
0.032333091610663785,
-0.06698304907021778,
-0.034574228416972504,
0.39712293362064416,
0.7565249878756971,
0.47534280601152273,
-0.054040601387606135,
-0.15959219218520598,
0.03072113906330156,
0.07803785290341991,
-0.003510275068374009,
-0.024952758046290123,
0.001359844742484172,
0.0069377611308027096,
-0.00022211647621176323,
-0.0013387206066921965,
3.656592483348223e-05,
0.00016545679579108483,
-5.396483179315242e-06,
-1.0797982104319795e-05,
]
# high-pass
decompositionHighFilter = [
1.0797982104319795e-05,
-5.396483179315242e-06,
-0.00016545679579108483,
3.656592483348223e-05,
0.0013387206066921965,
-0.00022211647621176323,
-0.0069377611308027096,
0.001359844742484172,
0.024952758046290123,
-0.003510275068374009,
-0.07803785290341991,
0.03072113906330156,
0.15959219218520598,
-0.054040601387606135,
-0.47534280601152273,
0.7565249878756971,
-0.39712293362064416,
-0.034574228416972504,
0.06698304907021778,
0.032333091610663785,
-0.004869274404904607,
-0.031051202843553064,
0.0031265171722710075,
0.012666731659857348,
-0.0007182119788317892,
-0.0038809122526038786,
0.0001084456223089688,
0.0008523547108047095,
-2.8078582128442894e-05,
-0.00010943147929529757,
3.113556407621969e-06,
6.230006701220761e-06,
]
# reconstruction filters
# low pass
reconstructionLowFilter = [
-1.0797982104319795e-05,
-5.396483179315242e-06,
0.00016545679579108483,
3.656592483348223e-05,
-0.0013387206066921965,
-0.00022211647621176323,
0.0069377611308027096,
0.001359844742484172,
-0.024952758046290123,
-0.003510275068374009,
0.07803785290341991,
0.03072113906330156,
-0.15959219218520598,
-0.054040601387606135,
0.47534280601152273,
0.7565249878756971,
0.39712293362064416,
-0.034574228416972504,
-0.06698304907021778,
0.032333091610663785,
0.004869274404904607,
-0.031051202843553064,
-0.0031265171722710075,
0.012666731659857348,
0.0007182119788317892,
-0.0038809122526038786,
-0.0001084456223089688,
0.0008523547108047095,
2.8078582128442894e-05,
-0.00010943147929529757,
-3.113556407621969e-06,
6.230006701220761e-06,
]
# high-pass
reconstructionHighFilter = [
6.230006701220761e-06,
3.113556407621969e-06,
-0.00010943147929529757,
-2.8078582128442894e-05,
0.0008523547108047095,
0.0001084456223089688,
-0.0038809122526038786,
-0.0007182119788317892,
0.012666731659857348,
0.0031265171722710075,
-0.031051202843553064,
-0.004869274404904607,
0.032333091610663785,
0.06698304907021778,
-0.034574228416972504,
-0.39712293362064416,
0.7565249878756971,
-0.47534280601152273,
-0.054040601387606135,
0.15959219218520598,
0.03072113906330156,
-0.07803785290341991,
-0.003510275068374009,
0.024952758046290123,
0.001359844742484172,
-0.0069377611308027096,
-0.00022211647621176323,
0.0013387206066921965,
3.656592483348223e-05,
-0.00016545679579108483,
-5.396483179315242e-06,
1.0797982104319795e-05,
]
| 28.614907
| 72
| 0.623833
| 334
| 4,607
| 8.568862
| 0.245509
| 0.008386
| 0.026555
| 0.02935
| 0.859539
| 0.859539
| 0.859539
| 0.859539
| 0.859539
| 0.859539
| 0
| 0.746264
| 0.288257
| 4,607
| 160
| 73
| 28.79375
| 0.126563
| 0.063599
| 0
| 0.914286
| 0
| 0
| 0.003978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
45cab71be33d658b084e8f81f4d3901bd0c7dae6
| 206
|
py
|
Python
|
model/third_party/HMNet/ThirdParty/ROUGE/pyrouge/utils/log.py
|
NickSchoelkopf/SummerTime
|
9a89aab8e1544e3c52c043b9c47ab325e665e11e
|
[
"Apache-2.0"
] | 178
|
2021-07-07T23:46:20.000Z
|
2022-03-26T17:47:21.000Z
|
model/third_party/HMNet/ThirdParty/ROUGE/pyrouge/utils/log.py
|
NickSchoelkopf/SummerTime
|
9a89aab8e1544e3c52c043b9c47ab325e665e11e
|
[
"Apache-2.0"
] | 77
|
2021-06-18T21:44:53.000Z
|
2022-02-20T00:23:06.000Z
|
model/third_party/HMNet/ThirdParty/ROUGE/pyrouge/utils/log.py
|
NickSchoelkopf/SummerTime
|
9a89aab8e1544e3c52c043b9c47ab325e665e11e
|
[
"Apache-2.0"
] | 19
|
2021-06-18T22:24:47.000Z
|
2022-03-16T12:53:50.000Z
|
import logging
def get_console_logger(name, level=logging.WARNING):
return logging.getLogger("pyrouge")
def get_global_console_logger(level=logging.WARNING):
return logging.getLogger("pyrouge")
| 20.6
| 53
| 0.786408
| 26
| 206
| 6.038462
| 0.5
| 0.076433
| 0.242038
| 0.318471
| 0.611465
| 0.611465
| 0.611465
| 0
| 0
| 0
| 0
| 0
| 0.11165
| 206
| 9
| 54
| 22.888889
| 0.857924
| 0
| 0
| 0.4
| 0
| 0
| 0.067961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
45df4ea695e4bc0eeed1e7c57bfb7e0dfb88369b
| 51,248
|
py
|
Python
|
pyredis/commands.py
|
adityagrg/pyredis-Python2
|
136b40062152599630171c1002b95a13135b8059
|
[
"MIT"
] | null | null | null |
pyredis/commands.py
|
adityagrg/pyredis-Python2
|
136b40062152599630171c1002b95a13135b8059
|
[
"MIT"
] | null | null | null |
pyredis/commands.py
|
adityagrg/pyredis-Python2
|
136b40062152599630171c1002b95a13135b8059
|
[
"MIT"
] | null | null | null |
__author__ = u'adityagrg'
__all__ = [
u'Connection',
u'Hash',
u'HyperLogLog',
u'Key',
u'List',
u'Publish',
u'Scripting',
u'Set',
u'SSet',
u'String',
u'Subscribe',
u'Transaction'
]
class BaseCommand(object):
def __init__(self):
self._cluster = False
def execute(self, *args, **kwargs):
raise NotImplemented
class Connection(BaseCommand):
def __init__(self):
super(Connection, self).__init__()
def echo(self, *args, shard_key=None, sock=None):
u""" Execute ECHO Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'ECHO', *args, shard_key=shard_key, sock=sock)
return self.execute(u'ECHO', *args)
def ping(self, shard_key=None, sock=None):
u""" Execute PING Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result,exception
"""
if self._cluster:
return self.execute(u'PING', shard_key=shard_key, sock=sock)
return self.execute(u'PING')
class Geo(BaseCommand):
def __init__(self):
super(Geo, self).__init__()
def geoadd(self, *args):
u""" Execute GEOADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GEOADD', *args, shard_key=args[0])
return self.execute(u'GEOADD', *args)
def geodist(self, *args):
u""" Execute GEODIST Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GEODIST', *args, shard_key=args[0])
return self.execute(u'GEODIST', *args)
def geohash(self, *args):
u""" Execute GEOHASH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GEOHASH', *args, shard_key=args[0])
return self.execute(u'GEOHASH', *args)
def georadius(self, *args):
u""" Execute GEORADIUS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GEORADIUS', *args, shard_key=args[0])
return self.execute(u'GEORADIUS', *args)
def geopos(self, *args):
u""" Execute GEOPOS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GEOPOS', *args, shard_key=args[0])
return self.execute(u'GEOPOS', *args)
def georadiusbymember(self, *args):
u""" Execute GEORADIUSBYMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GEORADIUSBYMEMBER', *args, shard_key=args[0])
return self.execute(u'GEORADIUSBYMEMBER', *args)
class Key(BaseCommand):
def __init__(self):
super(Key, self).__init__()
def delete(self, *args):
u""" Execute DEL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'DEL', *args, shard_key=args[0])
return self.execute(u'DEL', *args)
def dump(self, *args):
u""" Execute DUMP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'DUMP', *args, shard_key=args[0])
return self.execute(u'DUMP', *args)
def exists(self, *args):
u""" Execute EXISTS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'EXISTS', *args, shard_key=args[0])
return self.execute(u'EXISTS', *args)
def expire(self, *args):
u""" Execute EXPIRE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'EXPIRE', *args, shard_key=args[0])
return self.execute(u'EXPIRE', *args)
def expireat(self, *args):
u""" Execute EXPIREAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'EXPIREAT')
return self.execute(u'EXPIREAT', *args)
def keys(self, *args, shard_key=None, sock=None):
u""" Execute KEYS Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'KEYS', *args, shard_key=shard_key, sock=sock)
return self.execute(u'KEYS', *args)
def migrate(self, *args):
u""" Execute MIGRATE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
raise NotImplemented
return self.execute(u'MIGRATE', *args)
def move(self, *args):
u""" Execute MOVE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'MOVE', *args, shard_key=args[0])
return self.execute(u'MOVE', *args)
def object(self, *args, shard_key=None, sock=None):
u""" Execute OBJECT Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'DEL', *args, shard_key=shard_key, sock=sock)
return self.execute(u'OBJECT', *args)
def persist(self, *args):
u""" Execute PERSIST Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PERSIST', *args, shard_key=args[0])
return self.execute(u'PERSIST', *args)
def pexpire(self, *args):
u""" Execute PEXPIRE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PEXPIRE', *args, shard_key=args[0])
return self.execute(u'PEXPIRE', *args)
def pexpireat(self, *args):
u""" Execute PEXPIREAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PEXPIREAT', *args, shard_key=args[0])
return self.execute(u'PEXPIREAT', *args)
def pttl(self, *args):
u""" Execute PTTL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PTTL', *args, shard_key=args[0])
return self.execute(u'PTTL', *args)
def randomkey(self, *args, shard_key=None, sock=None):
u""" Execute RANDOMKEY Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'RANDOMKEY', *args, shard_key=shard_key, sock=sock)
return self.execute(u'RANDOMKEY', *args)
def rename(self, *args):
u""" Execute RENAME Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'RENAME', *args, shard_key=args[0])
return self.execute(u'RENAME', *args)
def renamenx(self, *args):
u""" Execute RENAMENX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'RENAMENX', *args, shard_key=args[0])
return self.execute(u'RENAMENX', *args)
def restore(self, *args):
u""" Execute RESTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'RESTORE', *args, shard_key=args[0])
return self.execute(u'RESTORE', *args)
def scan(self, *args, shard_key=None, sock=None):
u""" Execute SCAN Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'SCAN', *args, shard_key=shard_key, sock=sock)
return self.execute(u'SCAN', *args)
def sort(self, *args):
u""" Execute SORT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SORT', *args, shard_key=args[0])
return self.execute(u'SORT', *args)
def ttl(self, *args):
u""" Execute TTL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'TTL', *args, shard_key=args[0])
return self.execute(u'TTL', *args)
def type(self, *args):
u""" Execute TYPE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'TYPE', *args, shard_key=args[0])
return self.execute(u'TYPE', *args)
def wait(self, *args):
u""" Execute WAIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'WAIT', *args, shard_key=args[0])
return self.execute(u'WAIT', *args)
class String(BaseCommand):
def __init__(self):
super(String, self).__init__()
def append(self, *args):
u""" Execute APPEND Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'APPEND', *args, shard_key=args[0])
return self.execute(u'APPEND', *args)
def bitcount(self, *args):
u""" Execute BITCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'BITCOUNT', *args, shard_key=args[0])
return self.execute(u'BITCOUNT', *args)
def bitfield(self, *args):
u""" Execute BITFIELD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'BITFIELD', *args, shard_key=args[0])
return self.execute(u'BITFIELD', *args)
def bitop(self, *args):
u""" Execute BITOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'BITOP', *args, shard_key=args[1])
return self.execute(u'BITOP', *args)
def bitpos(self, *args):
u""" Execute BITPOS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'BITPOS', *args, shard_key=args[0])
return self.execute(u'BITPOS', *args)
def decr(self, *args):
u""" Execute DECR Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'DECR', *args, shard_key=args[0])
return self.execute(u'DECR', *args)
def decrby(self, *args):
u""" Execute DECRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'DECRBY', *args, shard_key=args[0])
return self.execute(u'DECRBY', *args)
def get(self, *args):
u""" Execute GET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GET', *args, shard_key=args[0])
return self.execute(u'GET', *args)
def getbit(self, *args):
u""" Execute GETBIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GETBIT', *args, shard_key=args[0])
return self.execute(u'GETBIT', *args)
def getrange(self, *args):
u""" Execute GETRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GETRANGE', *args, shard_key=args[0])
return self.execute(u'GETRANGE', *args)
def getset(self, *args):
u""" Execute GETSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'GETSET', *args, shard_key=args[0])
return self.execute(u'GETSET', *args)
def incr(self, *args):
u""" Execute INCR Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'INCR', *args, shard_key=args[0])
return self.execute(u'INCR', *args)
def incrby(self, *args):
u""" Execute INCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'INCRBY', *args, shard_key=args[0])
return self.execute(u'INCRBY', *args)
def incrbyfloat(self, *args):
u""" Execute INCRBYFLOAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'INCRBYFLOAT', *args, shard_key=args[0])
return self.execute(u'INCRBYFLOAT', *args)
def mget(self, *args):
u""" Execute MGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'MGET', *args, shard_key=args[0])
return self.execute(u'MGET', *args)
def mset(self, *args):
u""" Execute MSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'MSET', *args, shard_key=args[0])
return self.execute(u'MSET', *args)
def msetnx(self, *args):
u""" Execute MSETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'MSETNX', *args, shard_key=args[0])
return self.execute(u'MSETNX', *args)
def psetex(self, *args):
u""" Execute PSETEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PSETEX', *args, shard_key=args[0])
return self.execute(u'PSETEX', *args)
def set(self, *args):
u""" Execute SET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SET', *args, shard_key=args[0])
return self.execute(u'SET', *args)
def setbit(self, *args):
u""" Execute SETBIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SETBIT', *args, shard_key=args[0])
return self.execute(u'SETBIT', *args)
def setex(self, *args):
u""" Execute SETEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SETEX', *args, shard_key=args[0])
return self.execute(u'SETEX', *args)
def setnx(self, *args):
u""" Execute SETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SETNX', *args, shard_key=args[0])
return self.execute(u'SETNX', *args)
def setrange(self, *args):
u""" Execute SETRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SETRANGE', *args, shard_key=args[0])
return self.execute(u'SETRANGE', *args)
def strlen(self, *args):
u""" Execute STRLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'STRLEN', *args, shard_key=args[0])
return self.execute(u'STRLEN', *args)
class Hash(BaseCommand):
def __init__(self):
super(Hash, self).__init__()
def hdel(self, *args):
u""" Execute HDEL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HDEL', *args, shard_key=args[0])
return self.execute(u'HDEL', *args)
def hexists(self, *args):
u""" Execute HEXISTS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HEXISTS', *args, shard_key=args[0])
return self.execute(u'HEXISTS', *args)
def hget(self, *args):
u""" Execute HGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HGET', *args, shard_key=args[0])
return self.execute(u'HGET', *args)
def hgetall(self, *args):
u""" Execute HGETALL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HGETALL', *args, shard_key=args[0])
return self.execute(u'HGETALL', *args)
def hincrby(self, *args):
u""" Execute HINCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HINCRBY', *args, shard_key=args[0])
return self.execute(u'HINCRBY', *args)
def hincrbyfloat(self, *args):
u""" Execute HINCRBYFLOAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HINCRBYFLOAT', *args, shard_key=args[0])
return self.execute(u'HINCRBYFLOAT', *args)
def hkeys(self, *args):
u""" Execute HKEYS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HKEYS', *args, shard_key=args[0])
return self.execute(u'HKEYS', *args)
def hlen(self, *args):
u""" Execute HLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HLEN', *args, shard_key=args[0])
return self.execute(u'HLEN', *args)
def hmget(self, *args):
u""" Execute HMGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HMGET', *args, shard_key=args[0])
return self.execute(u'HMGET', *args)
def hmset(self, *args):
u""" Execute HMSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HMSET', *args, shard_key=args[0])
return self.execute(u'HMSET', *args)
def hset(self, *args):
u""" Execute HSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HSET', *args, shard_key=args[0])
return self.execute(u'HSET', *args)
def hsetnx(self, *args):
u""" Execute HSETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HSETNX', *args, shard_key=args[0])
return self.execute(u'HSETNX', *args)
def hstrlen(self, *args):
u""" Execute HSTRLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HSTRLEN', *args, shard_key=args[0])
return self.execute(u'HSTRLEN', *args)
def hvals(self, *args):
u""" Execute HVALS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HVALS', *args, shard_key=args[0])
return self.execute(u'HVALS', *args)
def hscan(self, *args):
u""" Execute HSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'HSCAN', *args, shard_key=args[0])
return self.execute(u'HSCAN', *args)
class List(BaseCommand):
def __init__(self):
super(List, self).__init__()
def blpop(self, *args):
u""" Execute BLPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'BLPOP', *args, shard_key=args[0])
return self.execute(u'BLPOP', *args)
def brpop(self, *args):
u""" Execute BRPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'BRPOP', *args, shard_key=args[0])
return self.execute(u'BRPOP', *args)
def brpoplpush(self, *args):
u""" Execute BRPOPPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'BRPOPPUSH', *args, shard_key=args[0])
return self.execute(u'BRPOPPUSH', *args)
def lindex(self, *args):
u""" Execute LINDEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LINDEX', *args, shard_key=args[0])
return self.execute(u'LINDEX', *args)
def linsert(self, *args):
u""" Execute LINSERT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LINSERT', *args, shard_key=args[0])
return self.execute(u'LINSERT', *args)
def llen(self, *args):
u""" Execute LLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LLEN', *args, shard_key=args[0])
return self.execute(u'LLEN', *args)
def lpop(self, *args):
u""" Execute LPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LPOP', *args, shard_key=args[0])
return self.execute(u'LPOP', *args)
def lpush(self, *args):
u""" Execute LPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LPUSH', *args, shard_key=args[0])
return self.execute(u'LPUSH', *args)
def lpushx(self, *args):
u""" Execute LPUSHX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LPUSHX', *args, shard_key=args[0])
return self.execute(u'LPUSHX', *args)
def lrange(self, *args):
u""" Execute LRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LRANGE', *args, shard_key=args[0])
return self.execute(u'LRANGE', *args)
def lrem(self, *args):
u""" Execute LREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LREM', *args, shard_key=args[0])
return self.execute(u'LREM', *args)
def lset(self, *args):
u""" Execute LSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LSET', *args, shard_key=args[0])
return self.execute(u'LSET', *args)
def ltrim(self, *args):
u""" Execute LTRIM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'LTRIM', *args, shard_key=args[0])
return self.execute(u'LTRIM', *args)
def rpop(self, *args):
u""" Execute RPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'RPOP', *args, shard_key=args[0])
return self.execute(u'RPOP', *args)
def rpoplpush(self, *args):
u""" Execute RPOPLPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'RPOPLPUSH', *args, shard_key=args[0])
return self.execute(u'RPOPLPUSH', *args)
def rpush(self, *args):
u""" Execute RPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'RPUSH', *args, shard_key=args[0])
return self.execute(u'RPUSH', *args)
def rpushx(self, *args):
u""" Execute RPUSHX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'RPUSHX', *args, shard_key=args[0])
return self.execute(u'RPUSHX', *args)
class Set(BaseCommand):
def __init__(self):
super(Set, self).__init__()
def sadd(self, *args):
u""" Execute SADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SADD', *args, shard_key=args[0])
return self.execute(u'SADD', *args)
def scard(self, *args):
u""" Execute SCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SCARD', *args, shard_key=args[0])
return self.execute(u'SCARD', *args)
def sdiff(self, *args):
u""" Execute SDIFF Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SDIFF', *args, shard_key=args[0])
return self.execute(u'SDIFF', *args)
def sdiffstore(self, *args):
u""" Execute SDIFFSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SDIFFSTORE', *args, shard_key=args[0])
return self.execute(u'SDIFFSTORE', *args)
def sinter(self, *args):
u""" Execute SINTER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SINTER', *args, shard_key=args[0])
return self.execute(u'SINTER', *args)
def sinterstore(self, *args):
u""" Execute SINTERSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SINTERSTORE', *args, shard_key=args[0])
return self.execute(u'SINTERSTORE', *args)
def sismember(self, *args):
u""" Execute SISMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SISMEMBER', *args, shard_key=args[0])
return self.execute(u'SISMEMBER', *args)
def smembers(self, *args):
u""" Execute SMEMBERS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SMEMBERS', *args, shard_key=args[0])
return self.execute(u'SMEMBERS', *args)
def smove(self, *args):
u""" Execute SMOVE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SMOVE', *args, shard_key=args[0])
return self.execute(u'SMOVE', *args)
def spop(self, *args):
u""" Execute SPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SPOP', *args, shard_key=args[0])
return self.execute(u'SPOP', *args)
def srandmember(self, *args):
u""" Execute SRANDMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SRANDMEMBER', *args, shard_key=args[0])
return self.execute(u'SRANDMEMBER', *args)
def srem(self, *args):
u""" Execute SREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SREM', *args, shard_key=args[0])
return self.execute(u'SREM', *args)
def sunion(self, *args):
u""" Execute SUNION Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SUNION', *args, shard_key=args[0])
return self.execute(u'SUNION', *args)
def sunoinstore(self, *args):
u""" Execute SUNIONSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SUNIONSTORE', *args, shard_key=args[0])
return self.execute(u'SUNIONSTORE', *args)
def sscan(self, *args):
u""" Execute SSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'SSCAN', *args, shard_key=args[0])
return self.execute(u'SSCAN', *args)
class SSet(BaseCommand):
def __init__(self):
super(SSet, self).__init__()
def zadd(self, *args):
u""" Execute ZADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZADD', *args, shard_key=args[0])
return self.execute(u'ZADD', *args)
def zcard(self, *args):
u""" Execute ZCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZCARD', *args, shard_key=args[0])
return self.execute(u'ZCARD', *args)
def zcount(self, *args):
u""" Execute ZCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZCOUNT', *args, shard_key=args[0])
return self.execute(u'ZCOUNT', *args)
def zincrby(self, *args):
u""" Execute ZINCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZINCRBY', *args, shard_key=args[0])
return self.execute(u'ZINCRBY', *args)
def zinterstore(self, *args):
u""" Execute ZINTERSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZINTERSTORE', *args, shard_key=args[0])
return self.execute(u'ZINTERSTORE', *args)
def zlexcount(self, *args):
u""" Execute ZLEXCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZLEXCOUNT', *args, shard_key=args[0])
return self.execute(u'ZLEXCOUNT', *args)
def zrange(self, *args):
u""" Execute ZRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZRANGE', *args, shard_key=args[0])
return self.execute(u'ZRANGE', *args)
def zrangebylex(self, *args):
u""" Execute ZRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZRANGEBYLEX', *args, shard_key=args[0])
return self.execute(u'ZRANGEBYLEX', *args)
def zrangebyscore(self, *args):
u""" Execute ZRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(u'ZRANGEBYSCORE', *args)
def zrank(self, *args):
u""" Execute ZRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZRANK', *args, shard_key=args[0])
return self.execute(u'ZRANK', *args)
def zrem(self, *args):
u""" Execute ZREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREM', *args, shard_key=args[0])
return self.execute(u'ZREM', *args)
def zremrangebylex(self, *args):
u""" Execute ZREMRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREMRANGEBYLEX', *args, shard_key=args[0])
return self.execute(u'ZREMRANGEBYLEX', *args)
def zremrangebyrank(self, *args):
u""" Execute ZREMRANGEBYRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREMRANGEBYRANK', *args, shard_key=args[0])
return self.execute(u'ZREMRANGEBYRANK', *args)
def zremrangebyscrore(self, *args):
u""" Execute ZREMRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREMRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(u'ZREMRANGEBYSCORE', *args)
def zrevrange(self, *args):
u""" Execute ZREVRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREVRANGE', *args, shard_key=args[0])
return self.execute(u'ZREVRANGE', *args)
def zrevrangebylex(self, *args):
u""" Execute ZREVRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREVRANGEBYLEX', *args, shard_key=args[0])
return self.execute(u'ZREVRANGEBYLEX', *args)
def zrevrangebyscore(self, *args):
u""" Execute ZREVRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREVRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(u'ZREVRANGEBYSCORE', *args)
def zrevrank(self, *args):
u""" Execute ZREVRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZREVRANK', *args, shard_key=args[0])
return self.execute(u'ZREVRANK', *args)
def zscore(self, *args):
u""" Execute ZSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZSCORE', *args, shard_key=args[0])
return self.execute(u'ZSCORE', *args)
def zunionstore(self, *args):
u""" Execute ZUNIONSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZUNIONSTORE', *args, shard_key=args[0])
return self.execute(u'ZUNIONSTORE', *args)
def zscan(self, *args):
u""" Execute ZSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'ZSCAN', *args, shard_key=args[0])
return self.execute(u'ZSCAN', *args)
class HyperLogLog(BaseCommand):
def __init__(self):
super(HyperLogLog, self).__init__()
def pfadd(self, *args):
u""" Execute PFADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PFADD', *args, shard_key=args[0])
return self.execute(u'PFADD', *args)
def pfcount(self, *args):
u""" Execute PFCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PFCOUNT', *args, shard_key=args[0])
return self.execute(u'PFCOUNT', *args)
def pfmerge(self, *args):
u""" Execute PFMERGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'PFMERGE', *args, shard_key=args[0])
return self.execute(u'PFMERGE', *args)
class Publish(BaseCommand):
def __init__(self):
super(Publish, self).__init__()
def publish(self, *args):
u""" Execute PUBLISH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
raise NotImplemented
return self.execute(u'PUBLISH', *args)
class Subscribe(object):
def write(self, *args):
raise NotImplemented
def psubscribe(self, *args):
u""" Execute PSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(u'PSUBSCRIBE', *args)
def punsubscribe(self, *args):
u""" Execute PUNSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(u'PUNSUBSCRIBE', *args)
def subscribe(self, *args):
u""" Execute SUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(u'SUBSCRIBE', *args)
def unsubscribe(self, *args):
u""" Execute UNSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(u'UNSUBSCRIBE', *args)
class Transaction(BaseCommand):
def __init__(self):
super(Transaction, self).__init__()
def discard(self, *args, shard_key=None, sock=None):
u""" Execute DISCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'DISCARD', *args, shard_key=shard_key, sock=sock)
return self.execute(u'DISCARD', *args)
def exec(self, *args, shard_key=None, sock=None):
u""" Execute EXEC Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'EXEC', *args, shard_key=shard_key, sock=sock)
return self.execute(u'EXEC', *args)
def multi(self, *args, shard_key=None, sock=None):
u""" Execute MULTI Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'MULTI', *args, shard_key=shard_key, sock=sock)
return self.execute(u'MULTI', *args)
def unwatch(self, *args, shard_key=None, sock=None):
u""" Execute UNWATCH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'UNWATCH', *args, shard_key=shard_key, sock=sock)
return self.execute(u'UNWATCH', *args)
def watch(self, *args):
u""" Execute WATCH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(u'WATCH', *args, shard_key=args[0])
return self.execute(u'WATCH', *args)
class Scripting(BaseCommand):
def __init__(self):
super(Scripting, self).__init__()
def eval(self, *args, shard_key=None, sock=None):
u""" Execute EVAL Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'EVAL', *args, shard_key=shard_key, sock=sock)
return self.execute(u'EVAL', *args)
def evalsha(self, *args, shard_key=None, sock=None):
u""" Execute EVALSHA Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'EVALSHA', *args, shard_key=shard_key, sock=sock)
return self.execute(u'EVALSHA', *args)
def script_debug(self, *args, shard_key=None, sock=None):
u""" Execute SCRIPT DEBUG Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'SCRIPT', u'DEBUG', *args, shard_key=shard_key, sock=sock)
return self.execute(u'SCRIPT', u'DEBUG', *args)
def script_exists(self, *args, shard_key=None, sock=None):
u""" Execute SCRIPT EXISTS Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'SCRIPT', u'EXISTS', *args, shard_key=shard_key, sock=sock)
return self.execute(u'SCRIPT', u'EXISTS', *args)
def script_flush(self, *args, shard_key=None, sock=None):
u""" Execute SCRIPT FLUSH Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'SCRIPT', u'FLUSH', *args, shard_key=shard_key, sock=sock)
return self.execute(u'SCRIPT', u'FLUSH', *args)
def script_kill(self, *args, shard_key=None, sock=None):
u""" Execute SCRIPT KILL Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'SCRIPT', u'KILL', *args, shard_key=shard_key, sock=sock)
return self.execute(u'SCRIPT', u'KILL', *args)
def script_load(self, *args, shard_key=None, sock=None):
u""" Execute SCRIPT LOAD Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(u'SCRIPT', u'LOAD', *args, shard_key=shard_key, sock=sock)
return self.execute(u'SCRIPT', u'LOAD', *args)
| 32.935733
| 92
| 0.606033
| 6,191
| 51,248
| 4.940236
| 0.034728
| 0.090894
| 0.152297
| 0.161256
| 0.836685
| 0.824718
| 0.752624
| 0.751316
| 0.749093
| 0.602648
| 0
| 0.004638
| 0.284831
| 51,248
| 1,555
| 93
| 32.956913
| 0.829855
| 0.370805
| 0
| 0.203679
| 0
| 0
| 0.070453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206307
| false
| 0
| 0
| 0
| 0.590013
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
b3172bc2888915a92d181dfabf2abca5773f2e8e
| 766
|
py
|
Python
|
2016/test_q3.py
|
matthewelse/british-informatics-olympiad
|
4fcc3b264903af01555ad4cd2eb51ea7196f2057
|
[
"MIT"
] | 11
|
2020-11-11T09:28:44.000Z
|
2022-03-01T19:20:39.000Z
|
2016/test_q3.py
|
matthewelse/british-informatics-olympiad
|
4fcc3b264903af01555ad4cd2eb51ea7196f2057
|
[
"MIT"
] | 5
|
2020-11-30T04:06:52.000Z
|
2022-01-04T09:57:07.000Z
|
2016/test_q3.py
|
matthewelse/british-informatics-olympiad
|
4fcc3b264903af01555ad4cd2eb51ea7196f2057
|
[
"MIT"
] | 4
|
2020-12-06T11:07:24.000Z
|
2021-12-31T16:46:51.000Z
|
"""Test cases for 2016 Q3"""
import q3
def test_case0():
assert q3.solve(100, 2, 13) == 4
def test_case1():
assert q3.solve(20, 2, 3) == 2
def test_case2():
assert q3.solve(20, 2, 13) == 4
def test_case3():
assert q3.solve(100, 73, 89) == 2
def test_case4():
assert q3.solve(100, 19, 97) == 7
def test_case5():
assert q3.solve(1000, 3, 971) == 9
def test_case6():
assert q3.solve(2000, 977, 997) == 4
def test_case7():
assert q3.solve(5000, 83, 3643) == 10
def test_case8():
assert q3.solve(614700, 3643, 90149) == 18
def test_case9():
assert q3.solve(987654, 3643, 90149) == 16
def test_case10():
assert q3.solve(1000000, 2, 968137) == 18
def test_case11():
assert q3.solve(1000000, 993851, 995387) == 3
| 19.15
| 49
| 0.62141
| 127
| 766
| 3.653543
| 0.401575
| 0.181034
| 0.336207
| 0.103448
| 0.114224
| 0
| 0
| 0
| 0
| 0
| 0
| 0.27907
| 0.214099
| 766
| 39
| 50
| 19.641026
| 0.491694
| 0.028721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.48
| 1
| 0.48
| true
| 0
| 0.04
| 0
| 0.52
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
b33d2026da1b575cff3401d186f1ce5814e04bae
| 30,676
|
py
|
Python
|
custompackage/.ipynb_checkpoints/traintestloop-checkpoint.py
|
ilennaj/ktree_constraints
|
2a25e93c9b4f113caf633b08abb3e48e1c566c59
|
[
"CC0-1.0"
] | 4
|
2021-03-11T21:46:41.000Z
|
2021-12-01T06:32:42.000Z
|
custompackage/.ipynb_checkpoints/traintestloop-checkpoint.py
|
ilennaj/ktree_constraints
|
2a25e93c9b4f113caf633b08abb3e48e1c566c59
|
[
"CC0-1.0"
] | null | null | null |
custompackage/.ipynb_checkpoints/traintestloop-checkpoint.py
|
ilennaj/ktree_constraints
|
2a25e93c9b4f113caf633b08abb3e48e1c566c59
|
[
"CC0-1.0"
] | 1
|
2021-08-12T19:32:37.000Z
|
2021-08-12T19:32:37.000Z
|
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn as nn
import numpy as np
import math
import time
from torch.optim.optimizer import required
from torch.utils.data.dataset import random_split
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import Optimizer
from pytorchtools import EarlyStopping
def train_test_ktree(model, trainloader, validloader, testloader, epochs=10, randorder=False, patience=60):
'''
Trains and tests k-tree models
Inputs: model, trainloader, validloader, testloader, epochs, randorder, patience
Outputs: train loss_curve, train acc_curve, test ave_loss, test accuracy, trained model
'''
# Initialize loss function and optimizer
# criterion = nn.BCELoss()
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# to track training loss and accuracy as model trains
loss_curve = []
acc_curve = []
# to track the validation loss as the model trains
valid_losses = []
# to track the average validation loss per epoch as the model trains
avg_valid_losses = []
# if randorder == True, generate the randomizer index array for randomizing the input image pixel order
if randorder == True:
ordering = torch.randperm(len(trainloader.dataset.tensors[0][0]))
# Initialize early stopping object
early_stopping = EarlyStopping(patience=patience, verbose=False)
for epoch in range(epochs): # loop over the dataset multiple times
######################
# train the model #
######################
running_loss = 0.0
running_acc = 0.0
model.train()
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
# if torch.max(outputs) == 1:
# print('max output 1')
# print(torch.unique(outputs))
loss = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
if torch.sum(torch.isnan(loss)) > 0:
break
loss.backward()
#### # Freeze select weights by zeroing out gradients
for child in model.children():
for param in child.parameters():
for freeze_mask in model.freeze_mask_set:
if hasattr(param.grad, 'shape'):
if param.grad.shape == freeze_mask.shape:
param.grad[freeze_mask] = 0
optimizer.step()
# print statistics
running_loss += loss.item()
running_acc += ((outputs > 0) == labels.float().reshape(-1,1)).sum().item()/trainloader.batch_size
# Generate loss and accuracy curves by saving average every 4th minibatch
if (i % 4) == 3:
loss_curve.append(running_loss/4)
acc_curve.append(running_acc/4)
running_loss = 0.0
running_acc = 0.0
if torch.sum(torch.isnan(loss)) > 0:
print('loss is nan, now testing')
break
######################
# validate the model #
######################
model.eval() # prep model for evaluation
for _, data in enumerate(validloader):
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(inputs)
# calculate the loss
loss = criterion(output + 1e-8, labels.float().reshape(-1,1))
# record validation loss
valid_losses.append(loss.item())
valid_loss = np.average(valid_losses)
# early_stopping needs the validation loss to check if it has decreased,
# and if it has, it will make a checkpoint of the current model
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# load the last checkpoint with the best model
# model.load_state_dict(torch.load('checkpoint.pt'))
print('Finished Training, %d epochs' % (epoch+1))
######################
# test the model #
######################
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels, _ = data
if randorder == True:
# Randomize pixel order
images = images[:,ordering].cuda()
else:
images = images.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(images)
# calculate the loss
loss = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
# Sum up correct labelings
predicted = outputs > 0
total += labels.size(0)
correct += (predicted == labels.float().reshape(-1,1)).sum().item()
# Calculate test accuracy
accuracy = correct/total
print('Accuracy of the network on the test images: %2f %%' % (
100 * accuracy))
print('final outputs:', torch.unique(outputs))
if randorder == True:
return(loss_curve, acc_curve, loss, accuracy, model, ordering)
else:
return(loss_curve, acc_curve, loss, accuracy, model)
def train_test_fc(model, trainloader, validloader, testloader, epochs=10, patience=60, lr=0.001):
'''
Trains and tests fcnn models
Inputs: model, trainloader, validloader, testloader, epochs, patience
Outputs: train loss_curve, train acc_curve, test ave_loss, test accuracy, trained model
'''
t = Timer()
t.start()
# Initialize loss function and optimizer
# criterion = nn.BCELoss()
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
# to track the validation loss as the model trains
valid_losses = []
# to track the average validation loss per epoch as the model trains
avg_valid_losses = []
# to track training loss and accuracy as model trains
loss_curve = []
acc_curve = []
# Initialize early stopping object
early_stopping = EarlyStopping(patience=patience, verbose=False)
for epoch in range(epochs): # loop over the dataset multiple times
######################
# train the model #
######################
running_loss = 0.0
running_acc = 0.0
model.train()
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels, _ = data
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs + 1e-10, labels.float().reshape(-1,1))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# running_acc += (torch.round(outputs) == labels.float().reshape(-1,1)).sum().item()/trainloader.batch_size
running_acc += ((outputs > 0) == labels.float().reshape(-1,1)).sum().item()/trainloader.batch_size
if i % 4 == 3: # Generate loss and accuracy curves by saving average every 4th minibatch
loss_curve.append(running_loss/4)
acc_curve.append(running_acc/4)
running_loss = 0.0
running_acc = 0.0
######################
# validate the model #
######################
model.eval() # prep model for evaluation
for _, data in enumerate(validloader):
inputs, labels, _ = data
inputs = inputs.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(inputs)
# calculate the loss
loss = criterion(output + 1e-8, labels.float().reshape(-1,1))
# record validation loss
valid_losses.append(loss.item())
valid_loss = np.average(valid_losses)
# early_stopping needs the validation loss to check if it has decresed,
# and if it has, it will make a checkpoint of the current model
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# load the last checkpoint with the best model
# model.load_state_dict(torch.load('checkpoint.pt'))
print('Finished Training, %d epochs' % (epoch+1))
correct = 0
all_loss = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels, _ = data
images = images.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(images)
# calculate the loss
loss = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
# Sum up correct labelings
predicted = outputs > 0
total += labels.size(0)
correct += (predicted == labels.float().reshape(-1,1)).sum().item()
all_loss += loss
# Calculate test accuracy
accuracy = correct/total
# Calculate average loss
ave_loss = all_loss.item()/total
if ave_loss > 1000000:
print('ave_loss = ', ave_loss)
ave_loss = 4
print('ave_loss = ', ave_loss)
print('Accuracy of the network on the 10000 test images: %4f %%' % (
100 * accuracy))
t.stop()
return(loss_curve, acc_curve, ave_loss, accuracy, model)
class TimerError(Exception):
"""A custom exception used to report errors in use of Timer class"""
class Timer:
def __init__(self):
self._start_time = None
def start(self):
"""Start a new timer"""
if self._start_time is not None:
raise TimerError(f"Timer is running. Use .stop() to stop it")
self._start_time = time.perf_counter()
def stop(self):
"""Stop the timer, and report the elapsed time"""
if self._start_time is None:
raise TimerError(f"Timer is not running. Use .start() to start it")
elapsed_time = time.perf_counter() - self._start_time
self._start_time = None
print(f"Elapsed time: {elapsed_time:0.4f} seconds")
def train_test_ktree_sparse(model, trainloader, validloader, testloader, epochs=10, randorder=False, patience=60,
lr=0.001):
'''
Trains and tests k-tree models
Inputs: model, trainloader, validloader, testloader, epochs, randorder, patience
Outputs: train loss_curve, train acc_curve, test ave_loss, test accuracy, trained model
'''
t = Timer()
t.start()
# Initialize loss function and optimizer
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
# to track training loss and accuracy as model trains
loss_curve = []
acc_curve = []
# to track the validation loss as the model trains
valid_losses = []
# to track the average validation loss per epoch as the model trains
avg_valid_losses = []
# if randorder == True, generate the randomizer index array for randomizing the input image pixel order
if randorder == True:
ordering = torch.randperm(len(trainloader.dataset.tensors[0][0]))
# Initialize early stopping object
early_stopping = EarlyStopping(patience=patience, verbose=False)
for epoch in range(epochs): # loop over the dataset multiple times
######################
# train the model #
######################
running_loss = 0.0
running_acc = 0.0
model.train()
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
if torch.sum(torch.isnan(loss)) > 0:
break
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
running_acc += ((outputs > 0) == labels.float().reshape(-1,1)).sum().item()/trainloader.batch_size
# Generate loss and accuracy curves by saving average every 4th minibatch
if (i % 4) == 3:
loss_curve.append(running_loss/4)
acc_curve.append(running_acc/4)
running_loss = 0.0
running_acc = 0.0
if torch.sum(torch.isnan(loss)) > 0 or torch.sum(torch.isnan(outputs)) > 0:
print('loss is nan, now testing')
break
######################
# validate the model #
######################
model.eval() # prep model for evaluation
for _, data in enumerate(validloader):
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(inputs)
# calculate the loss
loss = criterion(output + 1e-8, labels.float().reshape(-1,1))
# record validation loss
valid_losses.append(loss.item())
valid_loss = np.average(valid_losses)
# early_stopping needs the validation loss to check if it has decreased,
# and if it has, it will make a checkpoint of the current model
if epoch > 200:
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# if torch.sum(torch.isnan(loss)) > 0 or torch.sum(torch.isnan(outputs)) > 0:
# loss = 10
# accuracy = 0.5
# return(loss_curve, acc_curve, loss, accuracy, model)
# load the last checkpoint with the best model
# model.load_state_dict(torch.load('checkpoint.pt'))
print('Finished Training, %d epochs' % (epoch+1))
######################
# test the model #
######################
correct = 0
total = 0
all_loss = 0
with torch.no_grad():
for data in testloader:
images, labels, _ = data
if randorder == True:
# Randomize pixel order
images = images[:,ordering].cuda()
else:
images = images.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(images)
# calculate the loss
loss = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
# Sum up correct labelings
predicted = outputs > 0
total += labels.size(0)
correct += (predicted == labels.float().reshape(-1,1)).sum().item()
all_loss += loss
# Calculate test accuracy
accuracy = correct/total
# Calculate average loss
ave_loss = all_loss.item()/total
print('Accuracy of the network on the test images: %2f %%' % (
100 * accuracy))
# print('final outputs:', torch.unique(outputs))
t.stop()
# if torch.sum(torch.isnan(torch.Tensor(ave_loss))) > 0 or torch.sum(torch.isnan(outputs)) > 0:
# ave_loss = 10
# # accuracy = 0.5
# return(loss_curve, acc_curve, ave_loss, accuracy, model)
if np.sum(np.isnan(np.array(ave_loss))) > 0:
print('nan ave_loss = ', ave_loss)
ave_loss = 4
print('ave_loss = ', ave_loss)
if ave_loss > 4:
print('big ave_loss = ', ave_loss)
ave_loss = 4
print('ave_loss = ', ave_loss)
if randorder == True:
return(loss_curve, acc_curve, ave_loss, accuracy, model, ordering)
else:
return(loss_curve, acc_curve, ave_loss, accuracy, model)
def train_test_ktree_sparse_debug(model, trainloader, validloader, testloader, epochs=10, randorder=False, patience=60,
lr=0.001):
loss_curve = []
acc_curve = []
ave_loss = 4
accuracy = 1
model = []
return(loss_curve, acc_curve, ave_loss, accuracy, model)
def train_test_ktree_multistage(model, trainloader, validloader, testloader, epochs=10, randorder=False, patience=60,
lr=0.001, multistage=True, stages=[0,1,2]):
'''
Trains and tests k-tree models
Inputs: model, trainloader, validloader, testloader, epochs, randorder, patience
Outputs: train loss_curve, train acc_curve, test ave_loss, test accuracy, trained model
'''
t = Timer()
t.start()
syn_layers = []
for syn_name in model.syn_names:
for syn_layer in list(model._modules[syn_name].parameters()):
syn_layers.append(syn_layer)
den_layers = []
for repeat in range(model.Repeats):
for den_name in model.names[repeat]:
for den_layer in list(model._modules[den_name].parameters()):
den_layers.append(den_layer)
sqgl_nonlin = list(model.sqgl.parameters())
# Initialize loss function
criterion = nn.BCEWithLogitsLoss()
# if randorder == True, generate the randomizer index array for randomizing the input image pixel order
if randorder == True:
ordering = torch.randperm(len(trainloader.dataset.tensors[0][0]))
if multistage == False:
stages = [3]
for stage in stages:
# Initialize loss function and optimizer
if stage == 0:
optimizer = optim.Adam(syn_layers, lr=lr)
elif stage == 1:
optimizer = optim.Adam(den_layers, lr=lr)
elif stage == 2:
optimizer = optim.Adam(sqgl_nonlin, lr=lr)
else:
optimizer = optim.Adam(model.parameters(), lr=lr)
# to track training loss and accuracy as model trains
loss_curve = []
acc_curve = []
# to track the validation loss as the model trains
valid_losses = []
# to track the average validation loss per epoch as the model trains
avg_valid_losses = []
# Initialize early stopping object
early_stopping = EarlyStopping(patience=patience, verbose=False)
for epoch in range(epochs): # loop over the dataset multiple times
######################
# train the model #
######################
running_loss = 0.0
running_acc = 0.0
model.train()
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
if torch.sum(torch.isnan(loss)) > 0:
break
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
running_acc += ((outputs > 0) == labels.float().reshape(-1,1)).sum().item()/trainloader.batch_size
# Generate loss and accuracy curves by saving average every 4th minibatch
if (i % 4) == 3:
loss_curve.append(running_loss/4)
acc_curve.append(running_acc/4)
running_loss = 0.0
running_acc = 0.0
if torch.sum(torch.isnan(loss)) > 0 or torch.sum(torch.isnan(outputs)) > 0:
print('loss is nan, now testing')
break
######################
# validate the model #
######################
model.eval() # prep model for evaluation
for _, data in enumerate(validloader):
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(inputs)
# calculate the loss
loss = criterion(output + 1e-8, labels.float().reshape(-1,1))
# record validation loss
valid_losses.append(loss.item())
valid_loss = np.average(valid_losses)
# early_stopping needs the validation loss to check if it has decreased,
# and if it has, it will make a checkpoint of the current model
if epoch > 200:
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
if torch.sum(torch.isnan(loss)) > 0 or torch.sum(torch.isnan(outputs)) > 0:
loss = 10
accuracy = 0.5
return(loss_curve, acc_curve, loss, accuracy, model)
# load the last checkpoint with the best model
# model.load_state_dict(torch.load('checkpoint.pt'))
print('Finished Training, %d epochs' % (epoch+1))
######################
# test the model #
######################
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels, _ = data
if randorder == True:
# Randomize pixel order
images = images[:,ordering].cuda()
else:
images = images.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(images)
# calculate the loss
loss = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
# Sum up correct labelings
predicted = outputs > 0
total += labels.size(0)
correct += (predicted == labels.float().reshape(-1,1)).sum().item()
# Calculate test accuracy
accuracy = correct/total
print('Accuracy of the network on the test images: %2f %%' % (
100 * accuracy))
# print('final outputs:', torch.unique(outputs))
t.stop()
if torch.sum(torch.isnan(loss)) > 0 or torch.sum(torch.isnan(outputs)) > 0:
loss = 10
# accuracy = 0.5
return(loss_curve, acc_curve, loss, accuracy, model)
if randorder == True:
return(loss_curve, acc_curve, loss, accuracy, model, ordering)
else:
return(loss_curve, acc_curve, loss, accuracy, model)
def train_test_ktree_synapse(model, trainloader, validloader, testloader, epochs=10, randorder=False, patience=60,
lr=0.001):
'''
Trains and tests k-tree models
Inputs: model, trainloader, validloader, testloader, epochs, randorder, patience
Outputs: train loss_curve, train acc_curve, test ave_loss, test accuracy, trained model
'''
t = Timer()
t.start()
# Initialize loss function and optimizer
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
# to track training loss and accuracy as model trains
loss_curve = []
acc_curve = []
# to track the validation loss as the model trains
valid_losses = []
# to track the average validation loss per epoch as the model trains
avg_valid_losses = []
# if randorder == True, generate the randomizer index array for randomizing the input image pixel order
if randorder == True:
ordering = torch.randperm(len(trainloader.dataset.tensors[0][0]))
# Initialize early stopping object
early_stopping = EarlyStopping(patience=patience, verbose=False)
for epoch in range(epochs): # loop over the dataset multiple times
######################
# train the model #
######################
running_loss = 0.0
running_acc = 0.0
model.train()
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs, loss_model = model(inputs)
loss_pred = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
loss = loss_pred + loss_model
if torch.sum(torch.isnan(loss)) > 0:
break
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
running_acc += ((outputs > 0) == labels.float().reshape(-1,1)).sum().item()/trainloader.batch_size
# Generate loss and accuracy curves by saving average every 4th minibatch
if (i % 4) == 3:
loss_curve.append(running_loss/4)
acc_curve.append(running_acc/4)
running_loss = 0.0
running_acc = 0.0
if torch.sum(torch.isnan(loss)) > 0 or torch.sum(torch.isnan(outputs)) > 0:
print('loss is nan, now testing')
break
######################
# validate the model #
######################
model.eval() # prep model for evaluation
for _, data in enumerate(validloader):
inputs, labels, _ = data
if randorder == True:
# Randomize pixel order
inputs = inputs[:,ordering].cuda()
else:
inputs = inputs.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output, loss_model = model(inputs)
# calculate the loss
loss_pred = criterion(output + 1e-8, labels.float().reshape(-1,1))
loss = loss_pred + loss_model
# record validation loss
valid_losses.append(loss.item())
valid_loss = np.average(valid_losses)
# early_stopping needs the validation loss to check if it has decreased,
# and if it has, it will make a checkpoint of the current model
if epoch > 200:
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
print('Finished Training, %d epochs' % (epoch+1))
######################
# test the model #
######################
correct = 0
total = 0
all_loss = 0
with torch.no_grad():
for data in testloader:
images, labels, _ = data
if randorder == True:
# Randomize pixel order
images = images[:,ordering].cuda()
else:
images = images.cuda()
labels = labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
outputs, loss_model = model(images)
# calculate the loss
loss_pred = criterion(outputs + 1e-8, labels.float().reshape(-1,1))
loss = loss_pred + loss_model
# Sum up correct labelings
predicted = outputs > 0
total += labels.size(0)
correct += (predicted == labels.float().reshape(-1,1)).sum().item()
all_loss += loss
# Calculate test accuracy
accuracy = correct/total
# Calculate average loss
ave_loss = all_loss.item()/total
print('Accuracy of the network on the test images: %2f %%' % (
100 * accuracy))
# print('final outputs:', torch.unique(outputs))
t.stop()
if np.sum(np.isnan(np.array(ave_loss))) > 0:
print('nan ave_loss = ', ave_loss)
ave_loss = 4
print('ave_loss = ', ave_loss)
if ave_loss > 4:
print('big ave_loss = ', ave_loss)
ave_loss = 4
print('ave_loss = ', ave_loss)
if randorder == True:
return(loss_curve, acc_curve, ave_loss, accuracy, model, ordering)
else:
return(loss_curve, acc_curve, ave_loss, accuracy, model)
| 36.69378
| 119
| 0.553755
| 3,433
| 30,676
| 4.850277
| 0.073405
| 0.020179
| 0.028106
| 0.029668
| 0.908954
| 0.895021
| 0.88271
| 0.877305
| 0.875503
| 0.868837
| 0
| 0.016495
| 0.335963
| 30,676
| 836
| 120
| 36.69378
| 0.800933
| 0.261377
| 0
| 0.859794
| 0
| 0
| 0.038384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018557
| false
| 0
| 0.026804
| 0
| 0.049485
| 0.063918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b344e3d7e916f327ffefd9feda6a2ac051d96066
| 22,219
|
py
|
Python
|
swagger_client/api/voice_endpoints_api.py
|
networthdata/generated-swagger-client
|
41dd3fb02b322ed1d39cbaef6b4091ae6cab0d0b
|
[
"MIT"
] | null | null | null |
swagger_client/api/voice_endpoints_api.py
|
networthdata/generated-swagger-client
|
41dd3fb02b322ed1d39cbaef6b4091ae6cab0d0b
|
[
"MIT"
] | null | null | null |
swagger_client/api/voice_endpoints_api.py
|
networthdata/generated-swagger-client
|
41dd3fb02b322ed1d39cbaef6b4091ae6cab0d0b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Speech Services API v2.0
Speech Services API v2.0. # noqa: E501
OpenAPI spec version: v2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class VoiceEndpointsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_voice_deployment(self, endpoint, **kwargs): # noqa: E501
"""Creates a new voice endpoint object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_voice_deployment(endpoint, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EndpointDefinition endpoint: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_voice_deployment_with_http_info(endpoint, **kwargs) # noqa: E501
else:
(data) = self.create_voice_deployment_with_http_info(endpoint, **kwargs) # noqa: E501
return data
def create_voice_deployment_with_http_info(self, endpoint, **kwargs): # noqa: E501
"""Creates a new voice endpoint object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_voice_deployment_with_http_info(endpoint, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EndpointDefinition endpoint: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['endpoint'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_voice_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'endpoint' is set
if ('endpoint' not in params or
params['endpoint'] is None):
raise ValueError("Missing the required parameter `endpoint` when calling `create_voice_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'endpoint' in params:
body_params = params['endpoint']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['subscription_key', 'token'] # noqa: E501
return self.api_client.call_api(
'/api/texttospeech/v2.0/endpoints', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_deployment(self, id, **kwargs): # noqa: E501
"""Delete the specified voice endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_deployment(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The id of voice endpoint. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_deployment_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_deployment_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_deployment_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete the specified voice endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_deployment_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The id of voice endpoint. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['subscription_key', 'token'] # noqa: E501
return self.api_client.call_api(
'/api/texttospeech/v2.0/endpoints/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_supported_locales_for_voice_endpoints(self, **kwargs): # noqa: E501
"""Gets a list of supported locales for custom voice endpoints. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_supported_locales_for_voice_endpoints(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_supported_locales_for_voice_endpoints_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_supported_locales_for_voice_endpoints_with_http_info(**kwargs) # noqa: E501
return data
def get_supported_locales_for_voice_endpoints_with_http_info(self, **kwargs): # noqa: E501
"""Gets a list of supported locales for custom voice endpoints. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_supported_locales_for_voice_endpoints_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_supported_locales_for_voice_endpoints" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['subscription_key', 'token'] # noqa: E501
return self.api_client.call_api(
'/api/texttospeech/v2.0/endpoints/locales', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_voice_deployment(self, id, **kwargs): # noqa: E501
"""Gets the details of a custom voice endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_voice_deployment(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: Endpoint
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_voice_deployment_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_voice_deployment_with_http_info(id, **kwargs) # noqa: E501
return data
def get_voice_deployment_with_http_info(self, id, **kwargs): # noqa: E501
"""Gets the details of a custom voice endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_voice_deployment_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: Endpoint
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_voice_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_voice_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['subscription_key', 'token'] # noqa: E501
return self.api_client.call_api(
'/api/texttospeech/v2.0/endpoints/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Endpoint', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_voice_deployments(self, **kwargs): # noqa: E501
"""Gets a list of voice endpoint details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_voice_deployments(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Endpoint]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_voice_deployments_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_voice_deployments_with_http_info(**kwargs) # noqa: E501
return data
def get_voice_deployments_with_http_info(self, **kwargs): # noqa: E501
"""Gets a list of voice endpoint details. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_voice_deployments_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Endpoint]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_voice_deployments" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['subscription_key', 'token'] # noqa: E501
return self.api_client.call_api(
'/api/texttospeech/v2.0/endpoints', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Endpoint]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_voice_endpoint(self, id, endpoint_update, **kwargs): # noqa: E501
"""Updates the name and description of the endpoint identified by the given ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_voice_endpoint(id, endpoint_update, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the endpoint. (required)
:param EndpointMetadataUpdate endpoint_update: The updated values for the endpoint. (required)
:return: Endpoint
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_voice_endpoint_with_http_info(id, endpoint_update, **kwargs) # noqa: E501
else:
(data) = self.update_voice_endpoint_with_http_info(id, endpoint_update, **kwargs) # noqa: E501
return data
def update_voice_endpoint_with_http_info(self, id, endpoint_update, **kwargs): # noqa: E501
"""Updates the name and description of the endpoint identified by the given ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_voice_endpoint_with_http_info(id, endpoint_update, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the endpoint. (required)
:param EndpointMetadataUpdate endpoint_update: The updated values for the endpoint. (required)
:return: Endpoint
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'endpoint_update'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_voice_endpoint" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_voice_endpoint`") # noqa: E501
# verify the required parameter 'endpoint_update' is set
if ('endpoint_update' not in params or
params['endpoint_update'] is None):
raise ValueError("Missing the required parameter `endpoint_update` when calling `update_voice_endpoint`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'endpoint_update' in params:
body_params = params['endpoint_update']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['subscription_key', 'token'] # noqa: E501
return self.api_client.call_api(
'/api/texttospeech/v2.0/endpoints/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Endpoint', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.981197
| 131
| 0.609253
| 2,540
| 22,219
| 5.071654
| 0.06811
| 0.044093
| 0.026083
| 0.033535
| 0.952492
| 0.944729
| 0.922372
| 0.908555
| 0.905372
| 0.889769
| 0
| 0.015261
| 0.301049
| 22,219
| 584
| 132
| 38.046233
| 0.814231
| 0.315901
| 0
| 0.78481
| 1
| 0
| 0.180073
| 0.057606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041139
| false
| 0
| 0.012658
| 0
| 0.113924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b353fbc71b9583a9dc441db20de7d702c692ef36
| 3,217
|
py
|
Python
|
lib_pypy/_pypy_winbase_cffi.py
|
SeraphRoy/PyPy-Functional
|
e825dce7f7c484fa666566974a93ed5d59fb73be
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
lib_pypy/_pypy_winbase_cffi.py
|
SeraphRoy/PyPy-Functional
|
e825dce7f7c484fa666566974a93ed5d59fb73be
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
lib_pypy/_pypy_winbase_cffi.py
|
SeraphRoy/PyPy-Functional
|
e825dce7f7c484fa666566974a93ed5d59fb73be
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# auto-generated file
import _cffi_backend
ffi = _cffi_backend.FFI('_pypy_winbase_cffi',
_version = 0x2601,
_types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01',
_globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0),
_struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')),
_typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'),
)
| 292.454545
| 1,362
| 0.746348
| 669
| 3,217
| 3.541106
| 0.171898
| 0.405234
| 0.156606
| 0.074293
| 0.442803
| 0.417476
| 0.348248
| 0.300127
| 0.300127
| 0.300127
| 0
| 0.339026
| 0.017097
| 3,217
| 10
| 1,363
| 321.7
| 0.410183
| 0.005906
| 0
| 0
| 1
| 0.125
| 0.870057
| 0.84683
| 0
| 1
| 0.001883
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2fa396459e724642621cc4ff04a6aa8d8ba9fa64
| 85
|
py
|
Python
|
emmet-api/emmet/api/core/__init__.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-api/emmet/api/core/__init__.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-api/emmet/api/core/__init__.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from emmet.api.core.api import MAPI
from emmet.api.core.settings import MAPISettings
| 28.333333
| 48
| 0.835294
| 14
| 85
| 5.071429
| 0.571429
| 0.253521
| 0.338028
| 0.450704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 85
| 2
| 49
| 42.5
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2fce595e80ba3933ef68fcf5dc4f3e9a4685ed27
| 84
|
py
|
Python
|
evkit/sensors/__init__.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 120
|
2019-04-22T04:45:28.000Z
|
2022-03-23T01:53:17.000Z
|
evkit/sensors/__init__.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 14
|
2019-06-12T08:21:21.000Z
|
2021-08-25T15:36:58.000Z
|
evkit/sensors/__init__.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 19
|
2019-06-19T07:00:36.000Z
|
2022-03-24T07:18:30.000Z
|
from .sensorpack import SensorPack as SensorDict
from .sensorpack import SensorPack
| 28
| 48
| 0.857143
| 10
| 84
| 7.2
| 0.5
| 0.388889
| 0.555556
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 84
| 2
| 49
| 42
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2fedd7e44704c7122d83f162db7ac464b6c70102
| 702
|
py
|
Python
|
Packages/Patterns_Package/symbols/line_symbols/Left_faced_equilataral_angle_Triangle.py
|
saribalarakeshreddy/Python-3.9.0
|
25b4c74feb2a27b91e69aa82becde23e356e82c4
|
[
"MIT"
] | null | null | null |
Packages/Patterns_Package/symbols/line_symbols/Left_faced_equilataral_angle_Triangle.py
|
saribalarakeshreddy/Python-3.9.0
|
25b4c74feb2a27b91e69aa82becde23e356e82c4
|
[
"MIT"
] | null | null | null |
Packages/Patterns_Package/symbols/line_symbols/Left_faced_equilataral_angle_Triangle.py
|
saribalarakeshreddy/Python-3.9.0
|
25b4c74feb2a27b91e69aa82becde23e356e82c4
|
[
"MIT"
] | null | null | null |
def for_Left_faced_equilataral_angle_Triangle():
""" pattern for : Left_faced_equilataral_angle_Triangle using for loop"""
for i in range(7):
for j in range(4):
if j==3 or i+j==3 or i-j==3:
print('*',end=' ')
else:
print(' ',end=' ')
print()
def while_Left_faced_equilataral_angle_Triangle():
""" pattern for : Left_faced_equilataral_angle_Triangle using while loop"""
i=0
while i<7:
j=0
while j<4:
if j==3 or i+j==3 or i-j==3:
print('*',end=' ')
else:
print(' ',end=' ')
j+=1
i+=1
print()
| 31.909091
| 80
| 0.475783
| 92
| 702
| 3.434783
| 0.271739
| 0.037975
| 0.253165
| 0.316456
| 0.743671
| 0.743671
| 0.734177
| 0.734177
| 0.734177
| 0.734177
| 0
| 0.03271
| 0.390313
| 702
| 22
| 81
| 31.909091
| 0.705607
| 0.192308
| 0
| 0.5
| 0
| 0
| 0.014981
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0.3
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ffa4d3dc99f3c109cf97474ea0ec14440aa63c2c
| 76
|
py
|
Python
|
torchreid/utils/__init__.py
|
Bhaskers-Blu-Org2/Semantics-Aligned-Representation-Learning-for-Person-Re-identification
|
e53715dd40be81b2215f4a530bde0c76bf1f378d
|
[
"MIT"
] | 25
|
2020-03-17T10:21:05.000Z
|
2022-03-16T20:05:41.000Z
|
torchreid/utils/__init__.py
|
wencoast/Semantics-Aligned-Representation-Learning-for-Person-Re-identification
|
e53715dd40be81b2215f4a530bde0c76bf1f378d
|
[
"MIT"
] | 5
|
2020-03-29T18:05:49.000Z
|
2020-11-15T17:03:20.000Z
|
torchreid/utils/__init__.py
|
wencoast/Semantics-Aligned-Representation-Learning-for-Person-Re-identification
|
e53715dd40be81b2215f4a530bde0c76bf1f378d
|
[
"MIT"
] | 12
|
2020-03-17T06:27:29.000Z
|
2021-09-13T12:48:12.000Z
|
from __future__ import absolute_import
from __future__ import print_function
| 38
| 38
| 0.907895
| 10
| 76
| 5.9
| 0.6
| 0.338983
| 0.542373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092105
| 76
| 2
| 39
| 38
| 0.855072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
925a39819c88bcb51ccc8b0a8c8496c4d9809ec1
| 26,448
|
py
|
Python
|
adaptiveleak/unit_tests/utils/message.py
|
tejaskannan/adaptive-sensor-security
|
4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b
|
[
"Apache-2.0"
] | null | null | null |
adaptiveleak/unit_tests/utils/message.py
|
tejaskannan/adaptive-sensor-security
|
4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b
|
[
"Apache-2.0"
] | null | null | null |
adaptiveleak/unit_tests/utils/message.py
|
tejaskannan/adaptive-sensor-security
|
4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
import h5py
from sklearn.metrics import mean_absolute_error
from adaptiveleak.utils import message
from adaptiveleak.utils.constants import SMALL_NUMBER
from adaptiveleak.utils.data_utils import pad_to_length, create_groups, select_range_shifts_array
from adaptiveleak.utils.shifting import merge_shift_groups
class TestByte(unittest.TestCase):
def test_encode_decode_six(self):
measurements = np.array([[0.25, -0.125, 0.75], [-0.125, 0.625, -0.5]])
precision = 6
width = 8
seq_length = 8
collected_indices = [0, 3]
encoded = message.encode_standard_measurements(measurements=measurements,
precision=precision,
width=width,
collected_indices=collected_indices,
seq_length=seq_length,
should_compress=False)
recovered, indices, _ = message.decode_standard_measurements(byte_str=encoded,
num_features=measurements.shape[1],
seq_length=seq_length,
width=width,
precision=precision,
should_compress=False)
# Check recovered values
self.assertTrue(np.all(np.isclose(measurements, recovered)))
# Check indices
self.assertEqual(len(indices), 2)
self.assertEqual(indices[0], collected_indices[0])
self.assertEqual(indices[1], collected_indices[1])
def test_encode_decode_two(self):
measurements = np.array([[0.25, -0.125, 0.75], [-0.125, 0.625, -0.5]])
precision = 2
width = 4
seq_length = 8
collected_indices = [0, 4]
encoded = message.encode_standard_measurements(measurements=measurements,
precision=precision,
width=width,
collected_indices=collected_indices,
seq_length=seq_length,
should_compress=False)
recovered, indices, _ = message.decode_standard_measurements(byte_str=encoded,
num_features=measurements.shape[1],
seq_length=seq_length,
width=width,
precision=precision,
should_compress=False)
expected = np.array([[0.25, 0.0, 0.75], [0.0, 0.5, -0.5]])
# Check recovered values
self.assertTrue(np.all(np.isclose(expected, recovered)))
# Check indices
self.assertEqual(len(indices), 2)
self.assertEqual(indices[0], collected_indices[0])
self.assertEqual(indices[1], collected_indices[1])
def test_encode_decode_ten(self):
measurements = np.array([[0.25, -0.125, (1.0 / 512.0)], [-0.125, 0.625, -0.5]])
precision = 10
width = 13
seq_length = 8
collected_indices = [0, 6]
encoded = message.encode_standard_measurements(measurements=measurements,
precision=precision,
width=width,
collected_indices=collected_indices,
seq_length=seq_length,
should_compress=False)
recovered, indices, _ = message.decode_standard_measurements(byte_str=encoded,
num_features=measurements.shape[1],
seq_length=seq_length,
width=width,
precision=precision,
should_compress=False)
# Check recovered values
self.assertTrue(np.all(np.isclose(measurements, recovered)))
# Check indices
self.assertEqual(len(indices), 2)
self.assertEqual(indices[0], collected_indices[0])
self.assertEqual(indices[1], collected_indices[1])
def test_encode_decode_two_compressed(self):
measurements = np.array([[0.25, -0.125, 0.75], [-0.125, 0.625, -0.5]])
precision = 2
width = 4
seq_length = 8
collected_indices = [0, 4]
encoded = message.encode_standard_measurements(measurements=measurements,
precision=precision,
width=width,
collected_indices=collected_indices,
seq_length=seq_length,
should_compress=True)
recovered, indices, _ = message.decode_standard_measurements(byte_str=encoded,
num_features=measurements.shape[1],
seq_length=seq_length,
width=width,
precision=precision,
should_compress=True)
expected = np.array([[0.25, 0.0, 0.75], [0.0, 0.5, -0.5]])
# Check recovered values
self.assertTrue(np.all(np.isclose(expected, recovered)))
# Check indices
self.assertEqual(len(indices), 2)
self.assertEqual(indices[0], collected_indices[0])
self.assertEqual(indices[1], collected_indices[1])
def test_encode_decode_six_compressed(self):
measurements = np.array([[1.25, -0.125, -0.75], [1.125, -0.625, -0.5]])
precision = 4
width = 6
seq_length = 8
collected_indices = [0, 4]
encoded = message.encode_standard_measurements(measurements=measurements,
precision=precision,
width=width,
collected_indices=collected_indices,
seq_length=seq_length,
should_compress=True)
recovered, indices, _ = message.decode_standard_measurements(byte_str=encoded,
num_features=measurements.shape[1],
seq_length=seq_length,
width=width,
precision=precision,
should_compress=True)
# Check recovered values
self.assertTrue(np.all(np.isclose(measurements, recovered)))
# Check indices
self.assertEqual(len(indices), 2)
self.assertEqual(indices[0], collected_indices[0])
self.assertEqual(indices[1], collected_indices[1])
class TestGroupWidths(unittest.TestCase):
def test_encode_decode_widths(self):
widths = [16, 5, 9, 12]
shifts = [7, 0, 4, 6]
reps = [10, 4, 3, 6]
encoded = message.encode_shifts(widths=widths, shifts=shifts, reps=reps, num_shift_bits=4, min_width=5)
rec_shifts, rec_widths, rec_reps, num_bytes = message.decode_shifts(encoded=encoded, num_shift_bits=4, min_width=5)
self.assertEqual(rec_widths, widths)
self.assertEqual(rec_shifts, shifts)
self.assertEqual(rec_reps, reps)
class TestStable(unittest.TestCase):
def test_encode_decode_two_groups(self):
measurements = np.array([[0.25, -0.125, 0.75], [-0.125, 0.625, -0.5]])
non_fractional = 2
seq_length = 8
collected_indices = [0, 1]
widths = [5, 5]
shifts = [-2, -1]
sizes = [3, 3]
encoded = message.encode_stable_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
widths=widths,
shifts=shifts,
group_sizes=sizes,
non_fractional=non_fractional)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=measurements.shape[1],
non_fractional=non_fractional)
# Check recovered values
error = mean_absolute_error(y_true=measurements, y_pred=decoded)
self.assertLess(error, SMALL_NUMBER)
# Check the returned width
self.assertEqual(widths, [5, 5])
# Check indices
self.assertEqual(indices, collected_indices)
def test_encode_decode_two_groups_truncated(self):
measurements = np.array([[0.25, -0.125, 0.75], [-0.125, 0.625, -0.5]])
non_fractional = 2
seq_length = 8
collected_indices = [0, 5]
widths = [5, 5]
shifts = [-1, -1]
sizes = [3, 3]
encoded = message.encode_stable_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
widths=widths,
shifts=shifts,
group_sizes=sizes,
non_fractional=non_fractional)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=measurements.shape[1],
non_fractional=non_fractional)
# Check recovered values
error = mean_absolute_error(y_true=measurements, y_pred=decoded)
self.assertLess(error, 0.03)
# Check the widths
self.assertEqual(widths, [5, 5])
# Check indices
self.assertEqual(indices, collected_indices)
def test_encode_decode_two_groups_truncated_signed(self):
measurements = np.array([[0.25, -0.125, -0.75], [0.125, -0.625, -0.5]])
non_fractional = 2
seq_length = 8
collected_indices = [0, 5]
widths = [5, 5]
shifts = [-1, -1]
sizes = [2, 4]
encoded = message.encode_stable_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
widths=widths,
shifts=shifts,
group_sizes=sizes,
non_fractional=non_fractional)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=measurements.shape[1],
non_fractional=non_fractional)
# Check recovered values
error = mean_absolute_error(y_true=measurements, y_pred=decoded)
self.assertLess(error, 0.002)
# Check the width
self.assertEqual(widths, [5, 5])
# Check indices
self.assertEqual(indices, collected_indices)
def test_encode_decode_three_groups(self):
measurements = np.array([[0.25, -0.125, 0.75], [-0.25, 0.625, -0.5]])
non_fractional = 4
seq_length = 8
collected_indices = [0, 7]
widths = [5, 6, 5]
shifts = [-1, -2, 0]
sizes = [2, 3, 1]
encoded = message.encode_stable_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
widths=widths,
shifts=shifts,
group_sizes=sizes,
non_fractional=non_fractional)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=measurements.shape[1],
non_fractional=non_fractional)
# Check recovered values
error = mean_absolute_error(y_true=measurements, y_pred=decoded)
self.assertLess(error, SMALL_NUMBER)
# Check widths
self.assertEqual(widths, [5, 6, 5])
# Check indices
self.assertEqual(indices, collected_indices)
def test_encode_decode_small_padded(self):
measurements = np.array([[0.25, -0.125, 0.75], [-0.125, 0.625, -0.5]])
non_fractional = 2
seq_length = 8
collected_indices = [0, 1]
widths = [5, 5]
shifts = [-2, -1]
sizes = [3, 3]
encoded = message.encode_stable_measurements(measurements=measurements,
collected_indices=collected_indices,
seq_length=seq_length,
widths=widths,
shifts=shifts,
group_sizes=sizes,
non_fractional=non_fractional)
encoded = pad_to_length(encoded, length=len(encoded) + 7)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=measurements.shape[1],
non_fractional=non_fractional)
# Check recovered values
error = mean_absolute_error(y_true=measurements, y_pred=decoded)
self.assertLess(error, SMALL_NUMBER)
# Check the returned width
self.assertEqual(widths, [5, 5])
# Check indices
self.assertEqual(indices, collected_indices)
def test_encode_decode_large(self):
# Load the data
with h5py.File('../../datasets/uci_har/train/data.h5', 'r') as fin:
inputs = fin['inputs'][0] # [50, 6]
width = 8
seq_length = inputs.shape[0]
collected_indices = list(range(seq_length))
non_fractional = 3
flattened = inputs.T.reshape(-1)
# Set the shifts
shifts = select_range_shifts_array(measurements=flattened,
old_width=16,
old_precision=13,
new_width=width,
num_range_bits=3)
merged_shifts, sizes = merge_shift_groups(values=flattened,
shifts=shifts,
max_num_groups=6)
# Set the widths using the number of groups
group_widths = [width for _ in sizes]
# Encode and Decode the message
encoded = message.encode_stable_measurements(measurements=inputs,
collected_indices=collected_indices,
seq_length=seq_length,
widths=group_widths,
group_sizes=sizes,
shifts=merged_shifts,
non_fractional=non_fractional)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=inputs.shape[1],
non_fractional=non_fractional)
error = mean_absolute_error(y_true=inputs, y_pred=decoded)
self.assertLessEqual(error, 0.01)
self.assertEqual(widths, group_widths)
def test_encode_decode_large_two(self):
# Load the data
with h5py.File('../../datasets/uci_har/train/data.h5', 'r') as fin:
inputs = fin['inputs'][495] # [50, 6]
width = 8
seq_length = inputs.shape[0]
collected_indices = list(range(seq_length))
non_fractional = 3
flattened = inputs.T.reshape(-1)
# Set the shifts
shifts = select_range_shifts_array(measurements=flattened,
old_width=16,
old_precision=13,
new_width=width,
num_range_bits=3)
merged_shifts, sizes = merge_shift_groups(values=flattened,
shifts=shifts,
max_num_groups=6)
# Set the widths using the number of groups
group_widths = [width for _ in sizes]
# Encode and Decode the message
encoded = message.encode_stable_measurements(measurements=inputs,
collected_indices=collected_indices,
seq_length=seq_length,
widths=group_widths,
group_sizes=sizes,
shifts=merged_shifts,
non_fractional=non_fractional)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=inputs.shape[1],
non_fractional=non_fractional)
error = mean_absolute_error(y_true=inputs, y_pred=decoded)
self.assertLessEqual(error, 0.01)
self.assertEqual(widths, group_widths)
def test_encode_decode_large_tight(self):
# Load the data
with h5py.File('../../datasets/uci_har/train/data.h5', 'r') as fin:
inputs = fin['inputs'][495] # [50, 6]
width = 5
seq_length = inputs.shape[0]
collected_indices = list(range(seq_length))
non_fractional = 3
flattened = inputs.T.reshape(-1)
# Set the shifts
shifts = select_range_shifts_array(measurements=flattened,
old_width=16,
old_precision=13,
new_width=width,
num_range_bits=3)
merged_shifts, sizes = merge_shift_groups(values=flattened,
shifts=shifts,
max_num_groups=6)
# Set the widths using the number of groups
group_widths = [width for _ in sizes]
# Encode and Decode the message
encoded = message.encode_stable_measurements(measurements=inputs,
collected_indices=collected_indices,
seq_length=seq_length,
widths=group_widths,
group_sizes=sizes,
shifts=merged_shifts,
non_fractional=non_fractional)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=inputs.shape[1],
non_fractional=non_fractional)
error = mean_absolute_error(y_true=inputs, y_pred=decoded)
self.assertLessEqual(error, 0.062)
self.assertEqual(widths, group_widths)
def test_encode_decode_large_padded(self):
# Load the data
with h5py.File('../../datasets/uci_har/train/data.h5', 'r') as fin:
inputs = fin['inputs'][495] # [50, 6]
width = 8
seq_length = inputs.shape[0]
collected_indices = list(range(seq_length))
non_fractional = 3
flattened = inputs.T.reshape(-1)
# Set the shifts
shifts = select_range_shifts_array(measurements=flattened,
old_width=16,
old_precision=13,
new_width=width,
num_range_bits=3)
merged_shifts, sizes = merge_shift_groups(values=flattened,
shifts=shifts,
max_num_groups=6)
# Set the widths using the number of groups
group_widths = [width for _ in sizes]
# Encode and Decode the message
encoded = message.encode_stable_measurements(measurements=inputs,
collected_indices=collected_indices,
seq_length=seq_length,
widths=group_widths,
group_sizes=sizes,
shifts=merged_shifts,
non_fractional=non_fractional)
encoded = pad_to_length(encoded, length=len(encoded) + 12)
decoded, indices, widths = message.decode_stable_measurements(encoded=encoded,
seq_length=seq_length,
num_features=inputs.shape[1],
non_fractional=non_fractional)
error = mean_absolute_error(y_true=inputs, y_pred=decoded)
self.assertLessEqual(error, 0.01)
self.assertEqual(widths, group_widths)
class TestDeltaEncode(unittest.TestCase):
def test_encode(self):
measurements = np.array([[10.0, 10.0], [12.0, 12.0], [12.5, 11.5]])
encoded = message.delta_encode(measurements)
expected = np.array([[10.0, 10.0], [2.0, 2.0], [0.5, -0.5]])
self.assertTrue(np.all(np.isclose(encoded, expected)))
def test_decode(self):
encoded = np.array([[10.0, 10.0], [2.0, 2.0], [0.5, -0.5]])
recovered = message.delta_decode(encoded)
expected = np.array([[10.0, 10.0], [12.0, 12.0], [12.5, 11.5]])
self.assertTrue(np.all(np.isclose(recovered, expected)))
def test_single_feature(self):
rand = np.random.RandomState(seed=3489)
seq_length = 7
measurements = rand.uniform(low=-2.0, high=2.0, size=(seq_length, 1))
encoded = message.delta_encode(measurements)
recovered = message.delta_decode(encoded)
self.assertTrue(np.all(np.isclose(recovered, measurements)))
def test_many_features(self):
rand = np.random.RandomState(seed=3489)
seq_length = 12
num_features = 5
measurements = rand.uniform(low=-2.0, high=2.0, size=(seq_length, num_features))
encoded = message.delta_encode(measurements)
recovered = message.delta_decode(encoded)
self.assertTrue(np.all(np.isclose(recovered, measurements)))
if __name__ == '__main__':
unittest.main()
| 46.4
| 123
| 0.457123
| 2,256
| 26,448
| 5.135195
| 0.072252
| 0.060596
| 0.029003
| 0.043505
| 0.923263
| 0.905654
| 0.888476
| 0.87363
| 0.87104
| 0.863444
| 0
| 0.039834
| 0.470357
| 26,448
| 569
| 124
| 46.481547
| 0.787193
| 0.034067
| 0
| 0.80798
| 0
| 0
| 0.007059
| 0.005647
| 0
| 0
| 0
| 0
| 0.124688
| 1
| 0.047382
| false
| 0
| 0.01995
| 0
| 0.077307
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
928896db4fa020128dd99e341598b1422f0d2587
| 127
|
py
|
Python
|
testprojectD-rice-d058558a4d4f/rice/api/__init__.py
|
YuanXMjoy/rice
|
05e908eea8c9189c3b392d2d57e5653191bf1da9
|
[
"MIT"
] | null | null | null |
testprojectD-rice-d058558a4d4f/rice/api/__init__.py
|
YuanXMjoy/rice
|
05e908eea8c9189c3b392d2d57e5653191bf1da9
|
[
"MIT"
] | null | null | null |
testprojectD-rice-d058558a4d4f/rice/api/__init__.py
|
YuanXMjoy/rice
|
05e908eea8c9189c3b392d2d57e5653191bf1da9
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
api = Blueprint('api', __name__)
from . import rice, order, login, change_password, change_phone
| 21.166667
| 63
| 0.771654
| 17
| 127
| 5.411765
| 0.705882
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141732
| 127
| 5
| 64
| 25.4
| 0.844037
| 0
| 0
| 0
| 0
| 0
| 0.023622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
|
0
| 7
|
2b96ced1b97eaf0217dd776e5ad366bc76bab8f0
| 15,667
|
py
|
Python
|
omega_miya/database/model/cooldown.py
|
rinrini001/omega-miya
|
53a6683fccb0618e306abe9e103cec78445f3796
|
[
"MIT"
] | 120
|
2021-04-20T13:20:46.000Z
|
2022-03-26T05:43:21.000Z
|
omega_miya/database/model/cooldown.py
|
rinrini001/omega-miya
|
53a6683fccb0618e306abe9e103cec78445f3796
|
[
"MIT"
] | 57
|
2021-04-20T08:10:14.000Z
|
2022-03-28T01:55:14.000Z
|
omega_miya/database/model/cooldown.py
|
rinrini001/omega-miya
|
53a6683fccb0618e306abe9e103cec78445f3796
|
[
"MIT"
] | 32
|
2021-04-21T01:57:17.000Z
|
2022-03-01T18:06:34.000Z
|
from omega_miya.database.database import BaseDB
from omega_miya.database.class_result import Result
from omega_miya.database.tables import CoolDownEvent
from datetime import datetime
from sqlalchemy.future import select
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
class DBCoolDownEvent(object):
global_group_type: str = 'global_group'
global_user_type: str = 'global_user'
group_type: str = 'group'
user_type: str = 'user'
@classmethod
async def add_global_group_cool_down_event(
cls, group_id: int, stop_at: datetime, description: str = None) -> Result.IntResult:
"""
:return:
result = 0: Success
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.global_group_type).
where(CoolDownEvent.group_id == group_id)
)
exist_event = session_result.scalar_one()
exist_event.stop_at = stop_at
exist_event.description = description
exist_event.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_event = CoolDownEvent(
event_type=cls.global_group_type, group_id=group_id, stop_at=stop_at,
description=description, created_at=datetime.now())
session.add(new_event)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def check_global_group_cool_down_event(cls, group_id: int) -> Result.IntResult:
"""
:return:
result = 2: Success with CoolDown Event expired
result = 1: Success with CoolDown Event exist
result = 0: Success with CoolDown Event not found
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.global_group_type).
where(CoolDownEvent.group_id == group_id)
)
event = session_result.scalar_one()
stop_at = event.stop_at
if datetime.now() > stop_at:
result = Result.IntResult(error=False, info='Success, CoolDown expired', result=2)
else:
result = Result.IntResult(error=False, info=f'CoolDown until: {stop_at}', result=1)
except NoResultFound:
result = Result.IntResult(error=False, info='NoResultFound', result=0)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def add_global_user_cool_down_event(
cls, user_id: int, stop_at: datetime, description: str = None) -> Result.IntResult:
"""
:return:
result = 0: Success
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.global_user_type).
where(CoolDownEvent.user_id == user_id)
)
exist_event = session_result.scalar_one()
exist_event.stop_at = stop_at
exist_event.description = description
exist_event.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_event = CoolDownEvent(
event_type=cls.global_user_type, user_id=user_id, stop_at=stop_at,
description=description, created_at=datetime.now())
session.add(new_event)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def check_global_user_cool_down_event(cls, user_id: int) -> Result.IntResult:
"""
:return:
result = 2: Success with CoolDown Event expired
result = 1: Success with CoolDown Event exist
result = 0: Success with CoolDown Event not found
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.global_user_type).
where(CoolDownEvent.user_id == user_id)
)
event = session_result.scalar_one()
stop_at = event.stop_at
if datetime.now() > stop_at:
result = Result.IntResult(error=False, info='Success, CoolDown expired', result=2)
else:
result = Result.IntResult(error=False, info=f'CoolDown until: {stop_at}', result=1)
except NoResultFound:
result = Result.IntResult(error=False, info='NoResultFound', result=0)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def add_group_cool_down_event(
cls, plugin: str, group_id: int, stop_at: datetime, description: str = None) -> Result.IntResult:
"""
:return:
result = 0: Success
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.group_type).
where(CoolDownEvent.plugin == plugin).
where(CoolDownEvent.group_id == group_id)
)
exist_event = session_result.scalar_one()
exist_event.stop_at = stop_at
exist_event.description = description
exist_event.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_event = CoolDownEvent(
event_type=cls.group_type, plugin=plugin, group_id=group_id, stop_at=stop_at,
description=description, created_at=datetime.now())
session.add(new_event)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def check_group_cool_down_event(cls, plugin: str, group_id: int) -> Result.IntResult:
"""
:return:
result = 2: Success with CoolDown Event expired
result = 1: Success with CoolDown Event exist
result = 0: Success with CoolDown Event not found
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.group_type).
where(CoolDownEvent.plugin == plugin).
where(CoolDownEvent.group_id == group_id)
)
event = session_result.scalar_one()
stop_at = event.stop_at
if datetime.now() > stop_at:
result = Result.IntResult(error=False, info='Success, CoolDown expired', result=2)
else:
result = Result.IntResult(error=False, info=f'CoolDown until: {stop_at}', result=1)
except NoResultFound:
result = Result.IntResult(error=False, info='NoResultFound', result=0)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def add_user_cool_down_event(
cls, plugin: str, user_id: int, stop_at: datetime, description: str = None) -> Result.IntResult:
"""
:return:
result = 0: Success
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.user_type).
where(CoolDownEvent.plugin == plugin).
where(CoolDownEvent.user_id == user_id)
)
exist_event = session_result.scalar_one()
exist_event.stop_at = stop_at
exist_event.description = description
exist_event.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_event = CoolDownEvent(
event_type=cls.user_type, plugin=plugin, user_id=user_id, stop_at=stop_at,
description=description, created_at=datetime.now())
session.add(new_event)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def check_user_cool_down_event(cls, plugin: str, user_id: int) -> Result.IntResult:
"""
:return:
result = 2: Success with CoolDown Event expired
result = 1: Success with CoolDown Event exist
result = 0: Success with CoolDown Event not found
result = -1: Error
"""
async_session = BaseDB().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(CoolDownEvent).
where(CoolDownEvent.event_type == cls.user_type).
where(CoolDownEvent.plugin == plugin).
where(CoolDownEvent.user_id == user_id)
)
event = session_result.scalar_one()
stop_at = event.stop_at
if datetime.now() > stop_at:
result = Result.IntResult(error=False, info='Success, CoolDown expired', result=2)
else:
result = Result.IntResult(error=False, info=f'CoolDown until: {stop_at}', result=1)
except NoResultFound:
result = Result.IntResult(error=False, info='NoResultFound', result=0)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def clear_time_out_event(cls) -> Result.DictResult:
async_session = BaseDB().get_async_session()
async with async_session() as session:
async with session.begin():
session_result = await session.execute(
select(CoolDownEvent).order_by(CoolDownEvent.id)
)
events = session_result.scalars().all()
failed_events = []
for event in events:
try:
if datetime.now() >= event.stop_at:
await session.delete(event)
await session.commit()
except Exception as e:
await session.rollback()
failed_events.append((event, e))
continue
return Result.DictResult(error=False, info='Tasks completed', result={'all': events, 'failed': failed_events})
| 49.112853
| 118
| 0.538074
| 1,492
| 15,667
| 5.483914
| 0.067024
| 0.080665
| 0.092398
| 0.114397
| 0.916402
| 0.913713
| 0.911635
| 0.89868
| 0.89868
| 0.887191
| 0
| 0.006171
| 0.379396
| 15,667
| 318
| 119
| 49.267296
| 0.835339
| 0
| 0
| 0.822394
| 0
| 0
| 0.040796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023166
| 0
| 0.07722
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2bf2e8562de0753d1a55ffc2fc794f023a99ed51
| 46,158
|
py
|
Python
|
packs/chatops_tests/actions/test_aliases_with_slack.py
|
winem/st2tests
|
1d52733bb2b51c9e0cdcdec5759b56c9822cbdd1
|
[
"Apache-2.0"
] | 4
|
2015-08-26T12:06:30.000Z
|
2017-11-04T16:15:07.000Z
|
packs/chatops_tests/actions/test_aliases_with_slack.py
|
winem/st2tests
|
1d52733bb2b51c9e0cdcdec5759b56c9822cbdd1
|
[
"Apache-2.0"
] | 90
|
2015-06-06T01:16:20.000Z
|
2021-10-30T12:10:39.000Z
|
packs/chatops_tests/actions/test_aliases_with_slack.py
|
winem/st2tests
|
1d52733bb2b51c9e0cdcdec5759b56c9822cbdd1
|
[
"Apache-2.0"
] | 14
|
2015-06-15T01:48:04.000Z
|
2022-01-06T03:23:45.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
import os
import time
import unittest2
from slackclient import SlackClient
# REQUIRED environment variables:
# * WEBSOCKET_CLIENT_CA_BUNDLE
# - Should be set to:
# /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
# for RHEL7 systems
# - Unnecessary for systems with Python 2.7.9+ (eg: Ubuntu 16.04 and later)
# - Not directly used by this script, it is used to specify the certificate
# bundle for root certificates loaded by the websocket Python package
# * SLACK_CHANNEL
# - the Slack channel to connect to
# * SLACK_BOT_USERNAME
# - the Slack username for the StackStorm bot
# - this should be set to the same username as the SLACK_BOT_API_TOKEN
# * SLACK_USER_USERNAME
# - the Slack username for the Python script impersonating a user
# - this should be set to the same username as the SLACK_USER_API_TOKEN Slackbot, below
# * SLACK_USER_API_TOKEN
# - the Slack API token for the Python script that impersonates a user
# - THIS MUST BE DIFFERENT THAN SLACK_BOT_API_TOKEN
# OPTIONAL environment variables:
#
# * SLACK_WAIT_FOR_MESSAGES_TIMEOUT
# - Should be set to the number of seconds it is guaranteed to take the ST2
# IUT to respond
# - Used to timeout while waiting for responses, and used to wait long enough
# to assume a non-response for tests that don't expect responses
# - Default: 120
def ignore_username(userid):
# Remove 'user_typing' messages, since they are almost certainly
# caused by a human typing in the channel. Otherwise, the number of
# messages can be erroneously inflated.
def filter_messages(message):
if message['type'] != 'message':
return False
elif message.get('user') == userid:
return False
else:
return True
return filter_messages
class SlackEndToEndTestCase(unittest2.TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
cls.WAIT_FOR_MESSAGES_TIMEOUT = int(os.environ.get('SLACK_WAIT_FOR_MESSAGES_TIMEOUT', 120))
cls.SLACK_CHANNEL = os.environ['SLACK_CHANNEL']
cls.SLACK_BOT_USERNAME = os.environ['SLACK_BOT_USERNAME']
cls.SLACK_USER_API_TOKEN = os.environ['SLACK_USER_API_TOKEN']
cls.SLACK_USER_USERNAME = os.environ['SLACK_USER_USERNAME']
# This token is for the bot that impersonates a user
cls.client = SlackClient(connect=True, token=cls.SLACK_USER_API_TOKEN)
cls.channel = cls.SLACK_CHANNEL
cls.bot_username = cls.SLACK_BOT_USERNAME
cls.username = cls.SLACK_USER_USERNAME
cls.userid = cls.get_user_id(cls.username)
cls.filter = staticmethod(ignore_username(cls.userid))
cls.client.api_call(
"chat.postMessage",
channel=cls.channel,
text="`===== BEGINNING ChatOps End-to-End Tests =====`",
as_user=True)
# Connect as the bot
cls.client.rtm_connect()
@classmethod
def tearDownClass(cls):
cls.client.api_call(
"chat.postMessage",
channel=cls.channel,
text="`===== FINISHED ChatOps End-to-End Tests =====`",
as_user=True)
@classmethod
def get_user_id(cls, username):
for user in cls.client.api_call("users.list").get('members'):
if user.get('real_name') == username:
return user.get('id')
def test_non_response(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="This message should not prompt a response from the bot",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertListEqual(messages, [])
if len(messages) != 0:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Drain the event buffer
self.client.rtm_read()
def test_help_shortcut(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!help",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Help commands for 'unused' action alias should returns 105.
combined_text = messages[0]['text'] + "\n" + messages[1]['text']
number_of_unused_commands = len(list(filter(lambda line: line.startswith('![unused]'),
combined_text.split('\n'))))
self.assertEqual(number_of_unused_commands, 105)
# Drain the event buffer
self.client.rtm_read()
def test_help_longcut(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="@{bot_user} help".format(bot_user=self.bot_username),
as_user=True,
link_names=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Help commands for 'unused' action alias should returns 105
combined_text = messages[0]['text'] + "\n" + messages[1]['text']
number_of_unused_commands = len(list(filter(lambda line: line.startswith('![unused]'),
combined_text.split('\n'))))
self.assertEqual(number_of_unused_commands, 105)
# Drain the event buffer
self.client.rtm_read()
def test_run_command_on_localhost(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!remote run date on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
self.assertRegex(msg_text, r'web_url\s*:\s*')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
self.assertRegex(msg_text, r'result\s*:\s*')
self.assertRegex(msg_text, r'localhost\s*:\s*')
self.assertRegex(msg_text, r'stdout\s*:\s*')
# Drain the event buffer
self.client.rtm_read()
def test_run_command_on_localhost_with_bad_argument(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!pack get pack=example",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 1:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(1, len(messages))
if len(messages) != 1:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for response
self.assertIsNotNone(messages[0].get('bot_id'))
self.assertIsNotNone(messages[0].get('attachments'))
self.assertGreater(len(messages[0]['attachments']), 0)
self.assertIsNotNone(messages[0]['attachments'][0].get('color'))
self.assertEqual(messages[0]['attachments'][0]['color'], 'F35A00')
self.assertIsNotNone(messages[0]['attachments'][0].get('text'))
# Check the pretext
msg_pretext = messages[0]['attachments'][0]['pretext']
self.assertRegex(msg_pretext, r"<@{userid}>: I'm sorry, Dave. I'm afraid I can't do that. ".format(userid=self.userid))
# Test attachment
msg_text = messages[0]['attachments'][0]['text']
self.assertRegex(msg_text, r"Command \"pack get pack=example\" doesn't match format string \"pack get \{\{ pack \}\}\"")
# Drain the event buffer
self.client.rtm_read()
def test_run_exact_command_on_localhost(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!remote run \"echo ChatOps run exact command on localhost\" on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
# This test depends a bit on the hubot-stackstorm adapter
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
self.assertRegex(msg_text, r'web_url\s*:\s*')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
self.assertRegex(msg_text, r'result\s*:\s*')
self.assertRegex(msg_text, r'localhost\s*:\s*')
self.assertRegex(msg_text, r'stdout\s*:\s*ChatOps run exact command on localhost')
# Drain the event buffer
self.client.rtm_read()
def test_run_exact_command_on_multiple_hosts(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!remote run \"echo ChatOps run exact command on multiple hosts\" on localhost,127.0.0.1",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
self.assertRegex(msg_text, r'web_url\s*:\s*')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
self.assertRegex(msg_text, r'result\s*:\s*')
self.assertRegex(msg_text, r'localhost\s*:\s*\n\s*stdout\s*:\s*ChatOps run exact command on multiple hosts')
self.assertRegex(msg_text, r'127.0.0.1\s*:\s*\n\s*stdout\s*:\s*ChatOps run exact command on multiple hosts')
# Drain the event buffer
self.client.rtm_read()
def test_run_command_on_default_hosts(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!default run \"echo ChatOps run command on default hosts\"",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_run_command_with_regex_and_default_parameter(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!regex run \"echo ChatOps run command with regex\".",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_execute_command_with_regex_and_default_parameter(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!regex execute \"echo ChatOps execute command on default hosts\"!",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_run_command_with_extra_parameter(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!extra run \"echo ChatOps run command with extra parameter\" on localhost timeout=120",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_weird_run_remote_command_with_parameter(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!weird run remote command \"echo ChatOps run weird command\" on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_weird_run_remote_command_with_ssh(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!weird ssh to hosts localhost and run command \"echo ChatOps run weird command with SSH\"",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_weird_omg_just_run_command(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!weird OMG st2 just run this command \"echo ChatOps run weird OMG command\" on ma boxes localhost already",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_custom_ack(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!custom-ack run \"echo ChatOps run command with custom ack\" on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for response
self.assertIsNotNone(messages[0].get('bot_id'))
self.assertEqual(messages[0].get('text'), 'Running the command(s) for you')
# Drain the event buffer
self.client.rtm_read()
def test_disabled_ack(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!disabled-custom-ack run \"echo ChatOps run command with disabled ack\" on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 1:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(1, len(messages))
if len(messages) != 1:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for response
self.assertIsNotNone(messages[0].get('bot_id'))
self.assertIsNotNone(messages[0].get('attachments'))
self.assertGreater(len(messages[0]['attachments']), 0)
self.assertIsNotNone(messages[0]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[0]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[0]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
# Drain the event buffer
self.client.rtm_read()
def test_disabled_ack_with_bad_command(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!disabled-custom-ack run \"echof ChatOps run bad command\" on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 1:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(1, len(messages))
if len(messages) != 1:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for response
self.assertIsNotNone(messages[0].get('bot_id'))
self.assertIsNotNone(messages[0].get('attachments'))
self.assertGreater(len(messages[0]['attachments']), 0)
self.assertIsNotNone(messages[0]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[0]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test attachment
msg_text = messages[0]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.remote completed\.')
self.assertRegex(msg_text, r'status\s*:\s*failed')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
self.assertRegex(msg_text, r'stderr\s*:.*sh:.*echof:.*not found')
self.assertRegex(msg_text, r'return_code\s*:\s*\d+')
# Drain the event buffer
self.client.rtm_read()
def test_alias_with_custom_result_format(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!custom-format run \"echo ChatOps run command with custom result format\" on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test fallback
self.assertEqual(messages[1]['attachments'][0]['text'],
messages[1]['attachments'][0]['fallback'])
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
expected_text = ('Ran command `echo ChatOps run command with custom result format` on `1` host.\n'
'\n'
'Details are as follows:\n'
'Host: `localhost`\n'
' ---> stdout: ChatOps run command with custom result format\n'
' ---> stderr: \n')
self.assertEqual(msg_text, expected_text)
# Drain the event buffer
self.client.rtm_read()
def test_alias_with_custom_result_format_and_multiple_hosts(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!custom-format run \"echo ChatOps run command with custom result format on multiple hosts\" on localhost,127.0.0.1",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertRegex(messages[1]['attachments'][0]['pretext'], r'<@{userid}>'.format(userid=self.userid))
# Test fallback
self.assertEqual(messages[1]['attachments'][0]['text'],
messages[1]['attachments'][0]['fallback'])
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
expected_report = 'Ran command `echo ChatOps run command with custom result format on multiple hosts` on `2` hosts.\n'
expected_details = 'Details are as follows:\n'
expected_127_0_0_1 = ('Host: `127.0.0.1`\n'
' ---> stdout: ChatOps run command with custom result format on multiple hosts\n'
' ---> stderr: \n')
expected_localhost = ('Host: `localhost`\n'
' ---> stdout: ChatOps run command with custom result format on multiple hosts\n'
' ---> stderr: \n')
self.assertIn(expected_report, msg_text)
self.assertIn(expected_details, msg_text)
self.assertIn(expected_127_0_0_1, msg_text)
self.assertIn(expected_localhost, msg_text)
# Drain the event buffer
self.client.rtm_read()
def test_alias_with_disabled_result(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!disabled-result run \"echo ChatOps run command with disabled result\" on localhost",
as_user=True)
messages = []
# Wait for longer here since we want to test that it does _not_
# emit a result
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(1, len(messages))
if len(messages) != 1:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Drain the event buffer
self.client.rtm_read()
def test_attachment_and_plaintext_backup(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!plaintext-and-attachment run \"echo ChatOps run exact command with custom result format with plaintext and attachment\" on localhost",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertEqual(messages[1]['attachments'][0]['pretext'], '<@{userid}>: action completed! '.format(userid=self.userid))
# Test attachment
self.assertEqual(messages[1]['attachments'][0]['fallback'],
messages[1]['attachments'][0]['text'])
# Drain the event buffer
self.client.rtm_read()
def test_fields_parameter(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!kitten pic",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertEqual(messages[1]['attachments'][0]['pretext'], r'<@{userid}>: your kittens are here! '.format(userid=self.userid))
# Test fallback
self.assertEqual(messages[1]['attachments'][0]['fallback'],
messages[1]['attachments'][0]['text'])
# Test attachment
self.assertEqual(messages[1]['attachments'][0]['text'], ' Regards from the Box Kingdom.')
self.assertEqual(messages[1]['attachments'][0]['fields'],
[
{
'short': True,
'title': 'Kitten headcount',
'value': 'Eight.',
},
{
'short': True,
'title': 'Number of boxes',
'value': 'A bunch',
},
])
self.assertEqual(messages[1]['attachments'][0]['image_url'], 'http://i.imgur.com/Gb9kAYK.jpg')
self.assertEqual(messages[1]['attachments'][0]['color'], '00AA00')
# Drain the event buffer
self.client.rtm_read()
def test_jinja_input_parameters(self):
post_message_response = self.client.api_call(
"chat.postMessage",
channel=self.channel,
text="!say Hello in #88CCEE",
as_user=True)
messages = []
for i in range(self.WAIT_FOR_MESSAGES_TIMEOUT):
if len(messages) >= 2:
break
time.sleep(1)
all_messages = self.client.rtm_read()
filtered_messages = filter(self.filter, all_messages)
if filtered_messages:
messages.extend(filtered_messages)
self.assertEqual(2, len(messages))
if len(messages) != 2:
time.sleep(self.WAIT_FOR_MESSAGES_TIMEOUT)
# Test for ack
self.assertIn("details available at", messages[0]['text'])
# Test for response
self.assertIsNotNone(messages[1].get('bot_id'))
self.assertIsNotNone(messages[1].get('attachments'))
self.assertGreater(len(messages[1]['attachments']), 0)
self.assertIsNotNone(messages[1]['attachments'][0].get('text'))
# Check the pretext
self.assertEqual(messages[1]['attachments'][0]['pretext'], r'<@{userid}>: '.format(userid=self.userid))
# Test fallback
self.assertEqual(messages[1]['attachments'][0]['fallback'],
messages[1]['attachments'][0]['text'])
# Test attachment
msg_text = messages[1]['attachments'][0]['text']
self.assertRegex(msg_text, r'Action core\.noop completed\.')
self.assertRegex(msg_text, r'status\s*:\s*succeeded')
self.assertRegex(msg_text, r'execution\s*:\s*[0-9a-fA-F]{24}')
# The time can be an integer or a float, and might contain non-ASCII
# characters like mu (Unicode 03BC), which gets converted to \u03BC.
# So instead of strictly specifying those, we have a very relaxed
# regex to capture the execution duration.
self.assertRegex(msg_text, r'Took \d+.*s to complete\.')
self.assertEqual(messages[1]['attachments'][0]['color'], '88CCEE')
# Drain the event buffer
self.client.rtm_read()
try:
from st2common.runners.base_action import Action
class SlackEndToEndTestAction(Action):
def run(self, *args, **kwargs):
suite = unittest2.TestLoader().loadTestsFromTestCase(SlackEndToEndTestCase)
return unittest2.TextTestRunner().run(suite)
except ImportError:
pass
if __name__ == '__main__':
unittest2.main()
| 38.625941
| 153
| 0.610772
| 5,564
| 46,158
| 4.934759
| 0.064881
| 0.036712
| 0.053174
| 0.055833
| 0.878792
| 0.860837
| 0.848272
| 0.842044
| 0.833813
| 0.833813
| 0
| 0.01529
| 0.270289
| 46,158
| 1,194
| 154
| 38.658291
| 0.799893
| 0.142099
| 0
| 0.811575
| 0
| 0.005384
| 0.160514
| 0.021669
| 0
| 0
| 0
| 0
| 0.292059
| 1
| 0.039031
| false
| 0.001346
| 0.009421
| 0
| 0.060565
| 0.001346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ecf8c326b8cacff903228286c6469dfe3705476b
| 169
|
py
|
Python
|
app/views/__init__.py
|
sangjeedondrub/git-webhook
|
e272d4cc7c3961ef8d0e33a317fa282047e56fe4
|
[
"MIT"
] | 2
|
2018-07-30T05:51:00.000Z
|
2019-06-19T11:15:11.000Z
|
app/views/__init__.py
|
ncuhome/git-webhook
|
c33bbf99502fea46c6ceed1ec45a48069c533f1a
|
[
"MIT"
] | null | null | null |
app/views/__init__.py
|
ncuhome/git-webhook
|
c33bbf99502fea46c6ceed1ec45a48069c533f1a
|
[
"MIT"
] | 2
|
2016-11-21T02:38:25.000Z
|
2019-06-19T11:15:24.000Z
|
# -*- coding: utf-8 -*-
from app.views import common
from app.views import webhook
from app.views import server
from app.views import history
from app.views import api
| 21.125
| 29
| 0.763314
| 28
| 169
| 4.607143
| 0.428571
| 0.271318
| 0.465116
| 0.697674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.153846
| 169
| 7
| 30
| 24.142857
| 0.895105
| 0.12426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
a63e8f5d7066d5dbb4a2743e902064af918ee663
| 113
|
py
|
Python
|
util/util.py
|
calebcodesgud/Crypto
|
f56b29390f56bb7c9caef09a25012631ab890164
|
[
"MIT"
] | null | null | null |
util/util.py
|
calebcodesgud/Crypto
|
f56b29390f56bb7c9caef09a25012631ab890164
|
[
"MIT"
] | null | null | null |
util/util.py
|
calebcodesgud/Crypto
|
f56b29390f56bb7c9caef09a25012631ab890164
|
[
"MIT"
] | null | null | null |
def ISO_date_reformat(min_date, max_date):
return f'{min_date}'.split(' ')[0], f'{max_date}'.split(' ')[0]
| 37.666667
| 67
| 0.637168
| 19
| 113
| 3.473684
| 0.526316
| 0.212121
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020202
| 0.123894
| 113
| 3
| 67
| 37.666667
| 0.646465
| 0
| 0
| 0
| 0
| 0
| 0.198198
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
a66ba3bf5edbfaa0fb8e44a0e81c68bc0992b3ec
| 3,676
|
py
|
Python
|
tests/rules/test_protocol.py
|
bockstaller/europarl-crawler
|
5d4497da068cfe5dc10d250b232b0473821b4877
|
[
"MIT"
] | null | null | null |
tests/rules/test_protocol.py
|
bockstaller/europarl-crawler
|
5d4497da068cfe5dc10d250b232b0473821b4877
|
[
"MIT"
] | null | null | null |
tests/rules/test_protocol.py
|
bockstaller/europarl-crawler
|
5d4497da068cfe5dc10d250b232b0473821b4877
|
[
"MIT"
] | null | null | null |
from datetime import date
import pytest
from europarl.rules.protocol import ProtocolEnHtmlRule, ProtocolEnPdfRule
@pytest.mark.parametrize(
"date,expected",
[
(
date(year=2019, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-9-2019-08-01_EN.pdf",
),
(
date(year=2014, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-8-2014-08-01_EN.pdf",
),
(
date(year=2009, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-7-2009-08-01_EN.pdf",
),
(
date(year=2004, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-6-2004-08-01_EN.pdf",
),
(
date(year=1999, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-5-1999-08-01_EN.pdf",
),
(
date(year=1994, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-4-1994-08-01_EN.pdf",
),
(
date(year=1989, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-3-1989-08-01_EN.pdf",
),
(
date(year=1984, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-2-1984-08-01_EN.pdf",
),
(
date(year=1979, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-1-1979-08-01_EN.pdf",
),
(
date(year=1950, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-0-1950-08-01_EN.pdf",
),
(
date(year=2025, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-0-2025-08-01_EN.pdf",
),
],
)
def test_get_url_protocol_en_pdf(date, expected):
assert ProtocolEnPdfRule.url(date=date) == expected
@pytest.mark.parametrize(
"date,expected",
[
(
date(year=2019, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-9-2019-08-01_EN.html",
),
(
date(year=2014, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-8-2014-08-01_EN.html",
),
(
date(year=2009, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-7-2009-08-01_EN.html",
),
(
date(year=2004, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-6-2004-08-01_EN.html",
),
(
date(year=1999, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-5-1999-08-01_EN.html",
),
(
date(year=1994, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-4-1994-08-01_EN.html",
),
(
date(year=1989, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-3-1989-08-01_EN.html",
),
(
date(year=1984, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-2-1984-08-01_EN.html",
),
(
date(year=1979, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-1-1979-08-01_EN.html",
),
(
date(year=1950, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-0-1950-08-01_EN.html",
),
(
date(year=2025, month=8, day=1),
"https://europarl.europa.eu/doceo/document/PV-0-2025-08-01_EN.html",
),
],
)
def test_get_url_protocol_en_html(date, expected):
assert ProtocolEnHtmlRule.url(date) == expected
| 32.821429
| 80
| 0.53074
| 492
| 3,676
| 3.900407
| 0.107724
| 0.091714
| 0.103179
| 0.114643
| 0.885878
| 0.885878
| 0.82543
| 0.82543
| 0.82543
| 0.82543
| 0
| 0.127364
| 0.295158
| 3,676
| 111
| 81
| 33.117117
| 0.613277
| 0
| 0
| 0.47619
| 0
| 0.209524
| 0.39309
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 1
| 0.019048
| false
| 0
| 0.028571
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a6e63fbfeb3d6ad5bd2ca0b65c9e93042ef5cbb7
| 9,923
|
py
|
Python
|
src/abaqus/Load/ConcentratedForce.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Load/ConcentratedForce.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Load/ConcentratedForce.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
import typing
from abaqusConstants import *
from .Load import Load
from ..Region.Region import Region
class ConcentratedForce(Load):
"""The ConcentratedForce object defines a concentrated force.
The ConcentratedForce object is derived from the Load object.
Attributes
----------
name: str
A String specifying the load repository key.
distributionType: SymbolicConstant
A SymbolicConstant specifying how the load is distributed spatially. Possible values are
UNIFORM and FIELD. The default value is UNIFORM.
follower: Boolean
A Boolean specifying whether the direction of the force rotates with the rotation at
each node of the region. You should provide the **follower** argument only if it is valid
for the specified step. The default value is OFF.
localCsys: int
None or a :py:class:`~abaqus.Datum.DatumCsys.DatumCsys` object specifying the local coordinate system of the load's degrees
of freedom. If **localCsys=None**, the degrees of freedom are defined in the global
coordinate system. When this member is queried, it returns an Int. The default value is
None.
field: str
A String specifying the name of the :py:class:`~abaqus.Field.AnalyticalField.AnalyticalField` object associated with this load.
The **field** argument applies only when **distributionType=FIELD**. The default value is an
empty string.
region: Region
A :py:class:`~abaqus.Region.Region.Region` object specifying the region to which the load is applied.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].loads[name]
"""
# A String specifying the load repository key.
name: str = ''
# A SymbolicConstant specifying how the load is distributed spatially. Possible values are
# UNIFORM and FIELD. The default value is UNIFORM.
distributionType: SymbolicConstant = UNIFORM
# A Boolean specifying whether the direction of the force rotates with the rotation at
# each node of the region. You should provide the *follower* argument only if it is valid
# for the specified step. The default value is OFF.
follower: Boolean = OFF
# None or a DatumCsys object specifying the local coordinate system of the load's degrees
# of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
# coordinate system. When this member is queried, it returns an Int. The default value is
# None.
localCsys: int = None
# A String specifying the name of the AnalyticalField object associated with this load.
# The *field* argument applies only when *distributionType*=FIELD. The default value is an
# empty string.
field: str = ''
# A Region object specifying the region to which the load is applied.
region: Region = Region()
def __init__(self, name: str, createStepName: str, region: Region,
distributionType: SymbolicConstant = UNIFORM, field: str = '', cf1: float = None,
cf2: float = None, cf3: float = None, amplitude: str = UNSET, follower: Boolean = OFF,
localCsys: int = None):
"""This method creates a ConcentratedForce object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ConcentratedForce
Parameters
----------
name
A String specifying the load repository key.
createStepName
A String specifying the name of the step in which the load is created.
region
A Region object specifying the region to which the load is applied.
distributionType
A SymbolicConstant specifying how the load is distributed spatially. Possible values are
UNIFORM and FIELD. The default value is UNIFORM.
field
A String specifying the name of the AnalyticalField object associated with this load.
The *field* argument applies only when *distributionType*=FIELD. The default value is an
empty string.
cf1
A Float or a Complex specifying the concentrated force component in the 1-direction.
Although *cf1*, *cf2*, and *cf3* are optional arguments, at least one of them must be
nonzero.
cf2
A Float or a Complex specifying the concentrated force component in the 2-direction.
cf3
A Float or a Complex specifying the concentrated force component in the 3-direction.
amplitude
A String or the SymbolicConstant UNSET specifying the name of the amplitude reference.
UNSET should be used if the load has no amplitude reference. The default value is UNSET.
You should provide the *amplitude* argument only if it is valid for the specified step.
follower
A Boolean specifying whether the direction of the force rotates with the rotation at
each node of the region. You should provide the *follower* argument only if it is valid
for the specified step. The default value is OFF.
localCsys
None or a DatumCsys object specifying the local coordinate system of the load's degrees
of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
coordinate system. When this member is queried, it returns an Int. The default value is
None.
Returns
-------
A ConcentratedForce object.
"""
super().__init__()
pass
def setValues(self, distributionType: SymbolicConstant = UNIFORM, field: str = '', cf1: float = None,
cf2: float = None, cf3: float = None, amplitude: str = UNSET, follower: Boolean = OFF,
localCsys: int = None):
"""This method modifies the data for an existing ConcentratedForce object in the step where
it is created.
Parameters
----------
distributionType
A SymbolicConstant specifying how the load is distributed spatially. Possible values are
UNIFORM and FIELD. The default value is UNIFORM.
field
A String specifying the name of the AnalyticalField object associated with this load.
The *field* argument applies only when *distributionType*=FIELD. The default value is an
empty string.
cf1
A Float or a Complex specifying the concentrated force component in the 1-direction.
Although *cf1*, *cf2*, and *cf3* are optional arguments, at least one of them must be
nonzero.
cf2
A Float or a Complex specifying the concentrated force component in the 2-direction.
cf3
A Float or a Complex specifying the concentrated force component in the 3-direction.
amplitude
A String or the SymbolicConstant UNSET specifying the name of the amplitude reference.
UNSET should be used if the load has no amplitude reference. The default value is UNSET.
You should provide the *amplitude* argument only if it is valid for the specified step.
follower
A Boolean specifying whether the direction of the force rotates with the rotation at
each node of the region. You should provide the *follower* argument only if it is valid
for the specified step. The default value is OFF.
localCsys
None or a DatumCsys object specifying the local coordinate system of the load's degrees
of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
coordinate system. When this member is queried, it returns an Int. The default value is
None.
"""
pass
def setValuesInStep(self, stepName: str,
cf1: typing.Union[SymbolicConstant, float] = None,
cf2: typing.Union[SymbolicConstant, float] = None,
cf3: typing.Union[SymbolicConstant, float] = None,
amplitude: str = ''):
"""This method modifies the propagating data for an existing ConcentratedForce object in
the specified step.
Parameters
----------
stepName
A String specifying the name of the step in which the load is modified.
cf1
A Float, a Complex, or the SymbolicConstant UNCHANGED specifying the concentrated force
component in the 1-direction. UNCHANGED should be used if the concentrated force
component is propagated from the previous analysis step.
cf2
A Float, a Complex, or the SymbolicConstant UNCHANGED specifying the concentrated force
component in the 2-direction. UNCHANGED should be used if the concentrated force
component is propagated from the previous analysis step.
cf3
A Float, a Complex, or the SymbolicConstant UNCHANGED specifying the concentrated force
component in the 3-direction. UNCHANGED should be used if the concentrated force
component is propagated from the previous analysis step.
amplitude
A String or a SymbolicConstant specifying the name of the amplitude reference. Possible
values for the SymbolicConstant are UNCHANGED and FREED. UNCHANGED should be used if the
amplitude is propagated from the previous analysis step. FREED should be used if the
load is changed to have no amplitude reference. You should provide the *amplitude*
argument only if it is valid for the specified step.
"""
pass
| 49.368159
| 135
| 0.657059
| 1,245
| 9,923
| 5.230522
| 0.121285
| 0.055897
| 0.041462
| 0.04699
| 0.834152
| 0.816646
| 0.809429
| 0.777948
| 0.751536
| 0.751075
| 0
| 0.004722
| 0.295777
| 9,923
| 200
| 136
| 49.615
| 0.927161
| 0.762068
| 0
| 0.259259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.111111
| 0.148148
| 0
| 0.518519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
a6ec5503877df686540d013092246767da80bd92
| 81
|
py
|
Python
|
1_languages/python/src/operators_comparison.py
|
praisetompane/3_programming
|
dd3e2e89a36a613d895fdbdd9c03845cb648fddf
|
[
"MIT"
] | null | null | null |
1_languages/python/src/operators_comparison.py
|
praisetompane/3_programming
|
dd3e2e89a36a613d895fdbdd9c03845cb648fddf
|
[
"MIT"
] | null | null | null |
1_languages/python/src/operators_comparison.py
|
praisetompane/3_programming
|
dd3e2e89a36a613d895fdbdd9c03845cb648fddf
|
[
"MIT"
] | null | null | null |
print(3 > 2)
print(3 >= 2)
print(3 < 2)
print(3 <= 2)
print(3 == 2)
print(3 != 2)
| 13.5
| 13
| 0.518519
| 18
| 81
| 2.333333
| 0.166667
| 0.857143
| 1
| 1.428571
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.1875
| 0.209877
| 81
| 6
| 14
| 13.5
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 14
|
472e0fd8384cd046d555ba2b796972cb88a4e15b
| 5,991
|
py
|
Python
|
example_main.py
|
Rdataflow/pyFINT
|
f31a523ad21fc45c2b4480a1bd75263d99201848
|
[
"MIT"
] | null | null | null |
example_main.py
|
Rdataflow/pyFINT
|
f31a523ad21fc45c2b4480a1bd75263d99201848
|
[
"MIT"
] | 2
|
2021-12-21T08:20:36.000Z
|
2022-01-26T15:39:57.000Z
|
example_main.py
|
Rdataflow/pyFINT
|
f31a523ad21fc45c2b4480a1bd75263d99201848
|
[
"MIT"
] | 1
|
2022-01-25T21:35:37.000Z
|
2022-01-25T21:35:37.000Z
|
######################################################################
# Copyright (C) 2021 ecorisQ
# Use of this source code is governed by an MIT-style license that can be found in the LICENSE
# file or at https://opensource.org/licenses/MIT.
#
# Author: Christoph Schaller, BFH-HAFL, December 2020
#
# Script for demonstrating the use of pyFINT. The script expects
# input rasters of 1m resolution
######################################################################
import os
from datetime import timedelta
import time
from pyfintcontroller import *
# Default entry point
if __name__ == "__main__":
start_time = time.time()
#Expected resolution is 1m
#One Filter example
#One 1.5m resize example
#Path to output folder
working_dir = os.path.join(os.getcwd(), "output")
#Paths to input rasters
#Vegetation Height Model/Normalised Surface Model
nsm_file = os.path.join(os.getcwd(), "sample_data/VHM_1m.tif")
#
# Standard Detection with 1m input VHM without resizing of filtering
#
fint_controller = pyFintController()
fint_controller.set_working_dir(working_dir)
#Whether to allow the use of altitude in DBH calculation (requires DTM)
fint_controller.m_altitude_allowed = False
#NSM/VHM used for detection
fint_controller.set_normalized_model_file_name(nsm_file,None)
#Set the function for calculating the DBH, whether to allow altitude in calculation
fint_controller.set_dbh_function("2.52*H^0.84", False)
#Whether to randomize the DBH value and the degree of deviation in percent
fint_controller.set_diameter_randomization(False,20)
#Minimum height of a pixel to be considered for a local maxima
fint_controller.set_minimum_height(1)
#Minimum height for a detected maxima to be consideres as a tree
fint_controller.set_minimum_detection_height(4)
#Tell the controller to run the detection
fint_controller.run_process()
#
# Detection with 1m input VHM resized to 1.5m
#
fint_controller = pyFintController()
fint_controller.set_working_dir(working_dir)
#Whether to allow the use of altitude in DBH calculation (requires DEM)
fint_controller.m_altitude_allowed = False
#NSM/VHM used for detection
fint_controller.set_normalized_model_file_name(nsm_file,None)
#Set the function for calculating the DBH, whether to allow altitude in calculation
fint_controller.set_dbh_function("2.52*H^0.84", False)
#Whether to randomize the DBH value and the degree of deviation in percent
fint_controller.set_diameter_randomization(False,20)
#Minimum height of a pixel to be considered for a local maxima
fint_controller.set_minimum_height(1)
#Minimum height for a detected maxima to be consideres as a tree
fint_controller.set_minimum_detection_height(4)
#Tell the controller to resize the input tho the specified resolution with the given method
#Supported methods basing on gdal: ["near", "bilinear", "cubic", "cubicspline", "lanczos", "average", "mode", "max", "min", "med", "q1", "q3"]
fint_controller.set_resize_resolution(1.5,"bilinear")
#Tell the controller to run the detection
fint_controller.run_process()
#
# Detection with 1m input VHM and with Gauss filter sigma=2 and radius=3
#
fint_controller = pyFintController()
fint_controller.set_working_dir(working_dir)
#Whether to allow the use of altitude in DBH calculation (requires DEM)
fint_controller.m_altitude_allowed = False
#NSM/VHM used for detection
fint_controller.set_normalized_model_file_name(nsm_file,None)
#Set the function for calculating the DBH, whether to allow altitude in calculation
fint_controller.set_dbh_function("2.52*H^0.84", False)
#Whether to randomize the DBH value and the degree of deviation in percent
fint_controller.set_diameter_randomization(False,20)
#Minimum height of a pixel to be considered for a local maxima
fint_controller.set_minimum_height(1)
#Minimum height for a detected maxima to be consideres as a tree
fint_controller.set_minimum_detection_height(4)
#Tell the controller to apply a Gauss filter of the given strength and radius; radius needs to be an odd number
fint_controller.set_gauss_filter(size = 3, sigma = 2)
#Tell the controller to run the detection
fint_controller.run_process()
#
# Detection with 1m input VHM with resizing to 1.5 as well as with Gauss filter sigma=2 and radius=3
#
fint_controller = pyFintController()
fint_controller.set_working_dir(working_dir)
#Whether to allow the use of altitude in DBH calculation (requires DEM)
fint_controller.m_altitude_allowed = False
#NSM/VHM used for detection
fint_controller.set_normalized_model_file_name(nsm_file,None)
#Set the function for calculating the DBH, whether to allow altitude in calculation
fint_controller.set_dbh_function("2.52*H^0.84", False)
#Whether to randomize the DBH value and the degree of deviation in percent
fint_controller.set_diameter_randomization(False,20)
#Minimum height of a pixel to be considered for a local maxima
fint_controller.set_minimum_height(1)
#Minimum height for a detected maxima to be consideres as a tree
fint_controller.set_minimum_detection_height(4)
#Tell the controller to resize the input tho the specified resolution with the given method
#Supported methods basing on gdal: ["near", "bilinear", "cubic", "cubicspline", "lanczos", "average", "mode", "max", "min", "med", "q1", "q3"]
fint_controller.set_resize_resolution(1.5,"bilinear")
#Tell the controller to apply a Gauss filter of the given strength and radius; radius needs to be an odd number
fint_controller.set_gauss_filter(size = 3, sigma = 2)
#Tell the controller to run the detection
fint_controller.run_process()
print("TOTAL PROCESSING TIME: %s (h:min:sec)" % str(timedelta(seconds=(time.time() - start_time))))
| 47.547619
| 146
| 0.734101
| 869
| 5,991
| 4.897583
| 0.205984
| 0.131579
| 0.111842
| 0.045113
| 0.838111
| 0.824248
| 0.824248
| 0.824248
| 0.824248
| 0.824248
| 0
| 0.015593
| 0.175764
| 5,991
| 125
| 147
| 47.928
| 0.846294
| 0.519947
| 0
| 0.816327
| 0
| 0
| 0.049738
| 0.008227
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081633
| 0
| 0.081633
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
472fa965788c99514d37ebbd2567d0fa88f9cd88
| 133
|
py
|
Python
|
quest/plugins/base/__init__.py
|
sdc50/quest
|
95fee57e6fb177c4d32c5e6cffbde61333f81b7d
|
[
"BSD-3-Clause"
] | 12
|
2018-03-26T19:59:54.000Z
|
2022-02-02T01:21:09.000Z
|
quest/plugins/base/__init__.py
|
sdc50/quest
|
95fee57e6fb177c4d32c5e6cffbde61333f81b7d
|
[
"BSD-3-Clause"
] | 110
|
2018-02-08T19:56:15.000Z
|
2019-05-30T20:55:09.000Z
|
quest/plugins/base/__init__.py
|
sdc50/quest
|
95fee57e6fb177c4d32c5e6cffbde61333f81b7d
|
[
"BSD-3-Clause"
] | 10
|
2018-02-08T20:31:43.000Z
|
2020-08-05T18:45:01.000Z
|
from .tool_base import *
from .io_base import *
from .provider_base import *
from .publish_base import *
from .service_base import *
| 22.166667
| 28
| 0.774436
| 20
| 133
| 4.9
| 0.4
| 0.510204
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150376
| 133
| 5
| 29
| 26.6
| 0.867257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5b50f4a9d7e41fb807c3398da8f229fdcec68391
| 30,185
|
py
|
Python
|
VNN/experiment/model.py
|
arthuraleksandrovich/vector_ann
|
35158b5f9741646c362ef2c069be3503186975c9
|
[
"MIT"
] | null | null | null |
VNN/experiment/model.py
|
arthuraleksandrovich/vector_ann
|
35158b5f9741646c362ef2c069be3503186975c9
|
[
"MIT"
] | null | null | null |
VNN/experiment/model.py
|
arthuraleksandrovich/vector_ann
|
35158b5f9741646c362ef2c069be3503186975c9
|
[
"MIT"
] | null | null | null |
"""Models for experimenting"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from network import vlayers
from network import vlayers_conv
def get_scalar_model(dataset_shapes, hidden_layer_units=(2,), activation='relu', output_activation=None, \
kernel_initializer='random_normal', bias_initializer='random_normal', \
optimizer=keras.optimizers.RMSprop(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()]):
"""Scalar network, standard tensorflow implementation"""
if output_activation is None:
output_activation = activation
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Create model
inputs = keras.Input(shape=input_dims)
x = inputs
for h in hidden_layer_units:
x = layers.Dense(h, activation=activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
outputs = layers.Dense(output_dims[-1], activation=output_activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics
)
return model
def get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=(2,), inner_hidden_layer_units=(2,), \
activation='relu', output_activation=None, \
weight_type="unique", weight_initializer='random_normal', \
optimizer=keras.optimizers.RMSprop(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()]):
"""Vector network"""
if output_activation is None:
output_activation = activation
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Create model
inputs = keras.Input(shape=input_dims)
x = vlayers.VInput(hidden_layer_units[0] if len(hidden_layer_units) > 0 else output_dims[-1], weight_initializer='random_normal')(inputs)
if len(hidden_layer_units) > 0:
for h in hidden_layer_units[1:] + (output_dims[-1],):
if fractal_depth < 1:
x = vlayers.VDense(h, activation=activation, weight_initializer=weight_initializer)(x)
else:
x = vlayers.VFractal(h, depth=fractal_depth, hidden_layer_units=inner_hidden_layer_units, activation=activation, weight_initializer=weight_initializer, weight_type=weight_type)(x)
outputs = vlayers.VOutput(activation=output_activation)(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics
)
return model
def get_scalar_conv_model1(dataset_shapes, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Create model
first_filter = 7
inputs = keras.Input(shape=input_dims)
x = layers.Conv2D(output_dims[0], first_filter,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(inputs)
x = layers.ReLU()(x)
x = layers.MaxPool2D(pool_size=(input_dims[0]-first_filter+1, input_dims[1]-first_filter+1), strides=(1,1), padding='valid')(x)
x = layers.Activation('sigmoid')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_vector_conv_model1(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Create model
first_filter = 7
inputs = keras.Input(shape=input_dims)
x = vlayers_conv.VInputConv((first_filter,first_filter),
num_filters=output_dims[0],
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(inputs)
x = vlayers_conv.VConvFractal((input_dims[0]-first_filter+1, input_dims[1]-first_filter+1),
kernel_type="pooling",
strides=(1,1),
padding_type='valid',
layer_type="convolution",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_scalar_conv_model2(dataset_shapes, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Create model
first_filter = 3
second_filter = 5
third_filter = input_dims[0] - first_filter - second_filter + 2
first_filter_num = 5
inputs = keras.Input(shape=input_dims)
x = layers.Conv2D(first_filter_num, first_filter,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(inputs)
x = layers.ReLU()(x)
x = layers.Conv2D(output_dims[0], second_filter,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.ReLU()(x)
x = layers.MaxPool2D(pool_size=(third_filter,third_filter), strides=(1,1), padding='valid')(x)
x = layers.Activation('sigmoid')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_vector_conv_model2(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Create model
first_filter = 3
second_filter = 5
third_filter = input_dims[0] - first_filter - second_filter + 2
first_filter_num = 5
inputs = keras.Input(shape=input_dims)
x = vlayers_conv.VInputConv((first_filter,first_filter),
num_filters=first_filter_num,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(inputs)
x = vlayers_conv.VConvFractal((second_filter,second_filter),
kernel_type="convolution",
num_filters=output_dims[0],
strides=(1,1),
padding_type='valid',
layer_type="convolution",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
x = vlayers_conv.VConv((third_filter,third_filter),
kernel_type="pooling",
strides=(1,1),
padding_type='valid',
layer_type="convolution",
activation="relu"
)(x)
x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_scalar_conv_model3(dataset_shapes, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Settings
first_filter_num = 5
first_filter_dim = 3
second_filter_num = output_dims[0]
second_filter_dim = ((input_dims[0] // 4) - (first_filter_dim - 1)) // 2
inputs = keras.Input(shape=input_dims)
# Decrease input twice
x = layers.AveragePooling2D(pool_size=(4,4), strides=(4,4), padding='valid')(inputs)
# First convolutional layer
x = layers.Conv2D(first_filter_num, first_filter_dim,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.ReLU()(x)
# First pooling layer
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# Second convolutional layer
x = layers.Conv2D(second_filter_num, second_filter_dim,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.Activation('softmax')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_vector_conv_model3(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Settings
first_filter_num = 5
first_filter_dim = 3
second_filter_num = output_dims[0]
second_filter_dim = ((input_dims[0] // 4) - (first_filter_dim - 1)) // 2
inputs = keras.Input(shape=input_dims)
# Decrease input
x = layers.AveragePooling2D(pool_size=(4,4), strides=(4,4), padding='valid')(inputs)
# First convolutional layer
x = vlayers_conv.VInputConv((first_filter_dim,first_filter_dim),
num_filters=first_filter_num,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((2,2),
kernel_type="pooling",
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
# First pooling layer
x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
# Second convolutional layer
x = layers.Conv2D(second_filter_num, second_filter_dim,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.Activation('softmax')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_scalar_conv_model4(dataset_shapes, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Settings
first_filter_num = 5
first_filter_dim = 3
second_filter_num = output_dims[0]
second_filter_dim = 3
third_filter_dim = (input_dims[0] // 4) - (first_filter_dim - 1) - (second_filter_dim - 1)
inputs = keras.Input(shape=input_dims)
# Decrease input twice
x = layers.AveragePooling2D(pool_size=(4,4), strides=(4,4), padding='valid')(inputs)
# First convolutional layer
x = layers.Conv2D(first_filter_num, first_filter_dim,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.ReLU()(x)
# Second convolutional layer
x = layers.Conv2D(second_filter_num, second_filter_dim,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.ReLU()(x)
# First pooling layer
x = layers.MaxPool2D(pool_size=(third_filter_dim,third_filter_dim), strides=(1,1), padding='valid')(x)
x = layers.Activation('softmax')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_vector_conv_model4(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0]
output_dims = dataset_shapes[1]
# Settings
first_filter_num = 5
first_filter_dim = 3
second_filter_num = output_dims[0]
second_filter_dim = 3
third_filter_dim = (input_dims[0] // 4) - (first_filter_dim - 1) - (second_filter_dim - 1)
inputs = keras.Input(shape=input_dims)
# Decrease input
x = layers.AveragePooling2D(pool_size=(4,4), strides=(4,4), padding='valid')(inputs)
# First convolutional layer
x = vlayers_conv.VInputConv((first_filter_dim,first_filter_dim),
num_filters=first_filter_num,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((second_filter_dim,second_filter_dim),
num_filters=second_filter_num,
kernel_type="convolution",
strides=(1,1),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
# Second convolutional layer
x = vlayers_conv.VOutputConv(layer_type="convolution", activation="relu")(x)
# First pooling layer
x = layers.MaxPool2D(pool_size=(third_filter_dim,third_filter_dim), strides=(1,1), padding='valid')(x)
x = layers.Activation('softmax')(x)
outputs = layers.Flatten()(x)
x = layers.Activation('softmax')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5(dataset_shapes, optimizer=keras.optimizers.SGD()):
"""Modified and simplified LeNet-5
ReLU layers after each convolution layer are added
Subsampling layers are replaced with MaxPool
C3 is simple conv layer
In fully connected layers sigmoid is replaced with ReLU,
Gaussian connection is replaced with softmax
Unmodified model's source: Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner.
Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11), pp. 2278–2324, 1998."""
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
# C1
x = layers.Conv2D(6, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.ReLU()(x)
# S2
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C3
x = layers.Conv2D(16, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
# S4
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C5
x = layers.Conv2D(120, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.Flatten()(x)
# F6
x = layers.Dense(84, activation='relu', kernel_initializer='random_normal', bias_initializer='random_normal')(x)
# Output
outputs = layers.Dense(output_dims[0], activation='softmax', kernel_initializer='random_normal', bias_initializer='random_normal')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5_without_fully_connected(dataset_shapes, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
# C1
x = layers.Conv2D(6, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.ReLU()(x)
# S2
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C3
x = layers.Conv2D(16, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
# S4
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C5
x = layers.Conv2D(output_dims[0], 5,
activation='softmax',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5_fractal1(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
# C1
# x = layers.Conv2D(6, 5,
# activation='relu',
# strides=(1,1),
# padding='valid',
# kernel_initializer='random_normal',
# bias_initializer='random_normal'
# )(x)
# x = layers.ReLU()(x)
x = vlayers_conv.VInputConv((5,5),
num_filters=6,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((2,2),
kernel_type="pooling",
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
# S2
# x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
# C3
x = layers.Conv2D(16, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
# S4
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C5
x = layers.Conv2D(output_dims[0], 5,
activation='softmax',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5_fractal2(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
# C1
x = layers.Conv2D(6, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
x = layers.ReLU()(x)
# S2
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C3
# x = layers.Conv2D(16, 5,
# activation='relu',
# strides=(1,1),
# padding='valid',
# kernel_initializer='random_normal',
# bias_initializer='random_normal'
# )(x)
x = vlayers_conv.VInputConv((5,5),
num_filters=16,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((2,2),
kernel_type="pooling",
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
# S4
# x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
# C5
x = layers.Conv2D(output_dims[0], 5,
activation='softmax',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5_fractal3(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
x = vlayers_conv.VInputConv((5,5),
num_filters=6,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((2,2),
kernel_type="pooling",
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
# S2
x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
# C5
x = layers.Conv2D(output_dims[0], 14,
activation='softmax',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5_fractal4(dataset_shapes, shared_inner_nets, optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
# C1
x = vlayers_conv.VInputConv((5,5),
num_filters=1,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((2,2),
kernel_type="pooling",
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation="relu",
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=(2,)
)(x)
# S2
x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
# C3
x = layers.Conv2D(16, 5,
activation='relu',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
# S4
x = layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid')(x)
# C5
x = layers.Conv2D(output_dims[0], 5,
activation='softmax',
strides=(1,1),
padding='valid',
kernel_initializer='random_normal',
bias_initializer='random_normal'
)(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5_fractal5(dataset_shapes, shared_inner_nets, hidden_layer_units=(2,), activation="relu", optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
# C1
x = vlayers_conv.VInputConv((5,5),
num_filters=6,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((2,2),
kernel_type="pooling",
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation=activation,
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=hidden_layer_units
)(x)
# S2
x = vlayers_conv.VConv((5,5),
layer_type="pooling", pooling="max",
num_filters=16,
kernel_type="convolution",
strides=(1,1),
padding_type='valid',
weight_initializer="random_normal"
)(x)
# C3
x = vlayers_conv.VConvFractal((2,2),
kernel_type="pooling",
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation=activation,
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=hidden_layer_units
)(x)
# S4
# x = vlayers_conv.VOutputConv(layer_type="pooling", pooling="max")(x)
x = vlayers_conv.VConv((5,5),
layer_type="pooling", pooling="max",
num_filters=output_dims[0],
kernel_type="convolution",
strides=(1,1),
padding_type='valid',
weight_initializer="random_normal"
)(x)
# C5
x = vlayers_conv.VOutputConv(layer_type="convolution", activation='softmax')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
def get_le_net_5_fractal6(dataset_shapes, shared_inner_nets, hidden_layer_units=(2,), activation="relu", optimizer=keras.optimizers.SGD()):
input_dims = dataset_shapes[0] # Initially 32x32x1
output_dims = dataset_shapes[1] # Initially 10
inputs = keras.Input(shape=input_dims)
x = inputs
# Pad input
if input_dims[0] < 32:
padding = (32 - input_dims[0]) // 2
x = layers.ZeroPadding2D(padding=padding)(x)
# C1
x = vlayers_conv.VInputConv((5,5),
num_filters=6,
kernel_type="convolution",
strides=(1,1),
padding_type='valid'
)(x)
x = vlayers_conv.VConvFractal((2,2),
kernel_type="convolution",
num_filters = 6,
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation=activation,
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=hidden_layer_units
)(x)
# S2
x = vlayers_conv.VConv((5,5),
layer_type="convolution", activation=activation,
num_filters=16,
kernel_type="convolution",
strides=(1,1),
padding_type='valid',
weight_initializer="random_normal"
)(x)
# C3
x = vlayers_conv.VConvFractal((2,2),
kernel_type="convolution",
num_filters = 16,
strides=(2,2),
padding_type='valid',
layer_type="convolution",
weight_initializer="random_normal",
activation=activation,
depth=1,
shared_inner_nets=shared_inner_nets,
hidden_layer_units=hidden_layer_units
)(x)
# S4
x = vlayers_conv.VConv((5,5),
layer_type="convolution", activation=activation,
num_filters=output_dims[0],
kernel_type="convolution",
strides=(1,1),
padding_type='valid',
weight_initializer="random_normal"
)(x)
# C5
x = vlayers_conv.VOutputConv(layer_type="convolution", activation='softmax')(x)
outputs = layers.Flatten()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()]
)
return model
| 33.17033
| 195
| 0.632533
| 3,558
| 30,185
| 5.144463
| 0.050871
| 0.026388
| 0.085446
| 0.039336
| 0.932583
| 0.924607
| 0.908326
| 0.907179
| 0.901005
| 0.884506
| 0
| 0.027237
| 0.243432
| 30,185
| 910
| 196
| 33.17033
| 0.774226
| 0.068544
| 0
| 0.89931
| 0
| 0
| 0.069237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024828
| false
| 0
| 0.006897
| 0
| 0.056552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b5c2dd193bd1480b5f5b6ce263a09884f843d48
| 19,417
|
py
|
Python
|
sdk/python/pulumi_aws/devicefarm/test_grid_project.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/devicefarm/test_grid_project.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/devicefarm/test_grid_project.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['TestGridProjectArgs', 'TestGridProject']
@pulumi.input_type
class TestGridProjectArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input['TestGridProjectVpcConfigArgs']] = None):
"""
The set of arguments for constructing a TestGridProject resource.
:param pulumi.Input[str] description: Human-readable description of the project.
:param pulumi.Input[str] name: The name of the Selenium testing project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
:param pulumi.Input['TestGridProjectVpcConfigArgs'] vpc_config: The VPC security groups and subnets that are attached to a project. See VPC Config below.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable description of the project.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Selenium testing project.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['TestGridProjectVpcConfigArgs']]:
"""
The VPC security groups and subnets that are attached to a project. See VPC Config below.
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['TestGridProjectVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
@pulumi.input_type
class _TestGridProjectState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input['TestGridProjectVpcConfigArgs']] = None):
"""
Input properties used for looking up and filtering TestGridProject resources.
:param pulumi.Input[str] arn: The Amazon Resource Name of this Test Grid Project.
:param pulumi.Input[str] description: Human-readable description of the project.
:param pulumi.Input[str] name: The name of the Selenium testing project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
:param pulumi.Input['TestGridProjectVpcConfigArgs'] vpc_config: The VPC security groups and subnets that are attached to a project. See VPC Config below.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name of this Test Grid Project.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable description of the project.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Selenium testing project.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['TestGridProjectVpcConfigArgs']]:
"""
The VPC security groups and subnets that are attached to a project. See VPC Config below.
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['TestGridProjectVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
class TestGridProject(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['TestGridProjectVpcConfigArgs']]] = None,
__props__=None):
"""
Provides a resource to manage AWS Device Farm Test Grid Projects.
> **NOTE:** AWS currently has limited regional support for Device Farm (e.g., `us-west-2`). See [AWS Device Farm endpoints and quotas](https://docs.aws.amazon.com/general/latest/gr/devicefarm.html) for information on supported regions.
## Import
DeviceFarm Test Grid Projects can be imported by their arn
```sh
$ pulumi import aws:devicefarm/testGridProject:TestGridProject example arn:aws:devicefarm:us-west-2:123456789012:testgrid-project:4fa784c7-ccb4-4dbf-ba4f-02198320daa1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Human-readable description of the project.
:param pulumi.Input[str] name: The name of the Selenium testing project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
:param pulumi.Input[pulumi.InputType['TestGridProjectVpcConfigArgs']] vpc_config: The VPC security groups and subnets that are attached to a project. See VPC Config below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[TestGridProjectArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage AWS Device Farm Test Grid Projects.
> **NOTE:** AWS currently has limited regional support for Device Farm (e.g., `us-west-2`). See [AWS Device Farm endpoints and quotas](https://docs.aws.amazon.com/general/latest/gr/devicefarm.html) for information on supported regions.
## Import
DeviceFarm Test Grid Projects can be imported by their arn
```sh
$ pulumi import aws:devicefarm/testGridProject:TestGridProject example arn:aws:devicefarm:us-west-2:123456789012:testgrid-project:4fa784c7-ccb4-4dbf-ba4f-02198320daa1
```
:param str resource_name: The name of the resource.
:param TestGridProjectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TestGridProjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['TestGridProjectVpcConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TestGridProjectArgs.__new__(TestGridProjectArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["vpc_config"] = vpc_config
__props__.__dict__["arn"] = None
super(TestGridProject, __self__).__init__(
'aws:devicefarm/testGridProject:TestGridProject',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['TestGridProjectVpcConfigArgs']]] = None) -> 'TestGridProject':
"""
Get an existing TestGridProject resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name of this Test Grid Project.
:param pulumi.Input[str] description: Human-readable description of the project.
:param pulumi.Input[str] name: The name of the Selenium testing project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
:param pulumi.Input[pulumi.InputType['TestGridProjectVpcConfigArgs']] vpc_config: The VPC security groups and subnets that are attached to a project. See VPC Config below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TestGridProjectState.__new__(_TestGridProjectState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["vpc_config"] = vpc_config
return TestGridProject(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name of this Test Grid Project.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Human-readable description of the project.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Selenium testing project.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> pulumi.Output[Optional['outputs.TestGridProjectVpcConfig']]:
"""
The VPC security groups and subnets that are attached to a project. See VPC Config below.
"""
return pulumi.get(self, "vpc_config")
| 50.303109
| 348
| 0.669877
| 2,368
| 19,417
| 5.334037
| 0.089105
| 0.087958
| 0.066503
| 0.064286
| 0.853614
| 0.840314
| 0.823054
| 0.815217
| 0.811179
| 0.800253
| 0
| 0.004165
| 0.220992
| 19,417
| 385
| 349
| 50.433766
| 0.830887
| 0.416903
| 0
| 0.746606
| 1
| 0
| 0.08932
| 0.031525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158371
| false
| 0.004525
| 0.031674
| 0
| 0.285068
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5b7eef74c9e604a935d921b723e9dd99f1c3dcce
| 2,654
|
py
|
Python
|
pyaz/network/application_gateway/client_cert/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/application_gateway/client_cert/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/application_gateway/client_cert/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from .... pyaz_utils import _call_az
def add(data, gateway_name, name, resource_group):
'''
Add trusted client certificate of the application gateway.
Required Parameters:
- data -- Certificate public data.
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert add", locals())
def remove(gateway_name, name, resource_group):
'''
Remove an existing trusted client certificate of the application gateway.
Required Parameters:
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert remove", locals())
def list(gateway_name, resource_group):
'''
List the existing trusted client certificate of the application gateway.
Required Parameters:
- gateway_name -- Name of the application gateway.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert list", locals())
def show(gateway_name, name, resource_group):
'''
Show an existing trusted client certificate of the application gateway.
Required Parameters:
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert show", locals())
def update(data, gateway_name, name, resource_group):
'''
Update trusted client certificate of the application gateway.
Required Parameters:
- data -- Certificate public data.
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert update", locals())
| 42.126984
| 128
| 0.727958
| 342
| 2,654
| 5.552632
| 0.122807
| 0.180095
| 0.102686
| 0.121116
| 0.912586
| 0.883096
| 0.849394
| 0.849394
| 0.849394
| 0.849394
| 0
| 0
| 0.192916
| 2,654
| 62
| 129
| 42.806452
| 0.886555
| 0.668802
| 0
| 0
| 0
| 0
| 0.331476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| false
| 0
| 0.090909
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b93dd0a7f2266c3611c141873d0d7500f35fb36
| 3,252
|
py
|
Python
|
tests/slo/backup_quality/predicate/test_sli_table_newer_modification_predicate.py
|
Morgenz/bbq
|
f0fd3f626841c610aee80ad08a61123b7cccb775
|
[
"Apache-2.0"
] | 41
|
2018-05-08T11:54:37.000Z
|
2022-02-09T21:19:17.000Z
|
tests/slo/backup_quality/predicate/test_sli_table_newer_modification_predicate.py
|
Morgenz/bbq
|
f0fd3f626841c610aee80ad08a61123b7cccb775
|
[
"Apache-2.0"
] | 139
|
2018-06-07T13:45:21.000Z
|
2021-04-30T20:44:06.000Z
|
tests/slo/backup_quality/predicate/test_sli_table_newer_modification_predicate.py
|
Morgenz/bbq
|
f0fd3f626841c610aee80ad08a61123b7cccb775
|
[
"Apache-2.0"
] | 5
|
2019-09-11T12:28:24.000Z
|
2022-02-04T21:38:29.000Z
|
import unittest
from mock import Mock, patch
from src.commons.big_query.big_query import BigQuery
from src.slo.backup_quality.predicate.sli_table_newer_modification_predicate import \
SLITableNewerModificationPredicate
class TestSLITableNewerModificationPredicate(unittest.TestCase):
@patch('src.commons.big_query.big_query.BigQuery.__init__',
Mock(return_value=None))
@patch('src.commons.big_query.big_query.BigQuery.get_table',
Mock(return_value={'projectId': 'p', 'lastModifiedTime': '1618522714837',
'schema': {'fields': []}}))
def test_should_return_true_if_get_table_has_newer_modification_time_than_census(self):
# given
sli_table = {
"snapshotTime": None,
"projectId": 'p',
"datasetId": 'd',
"tableId": 'd',
"partitionId": None,
"creationTime": None,
"lastModifiedTime": float('1.518522714837E9'),
"backupCreated": None,
"backupLastModified": None,
"xDays": 4
}
# when
is_modified = SLITableNewerModificationPredicate(BigQuery()).is_modified_since_last_census_snapshot(sli_table)
# then
self.assertTrue(is_modified)
@patch('src.commons.big_query.big_query.BigQuery.__init__',
Mock(return_value=None))
@patch('src.commons.big_query.big_query.BigQuery.get_table',
Mock(return_value={'projectId': 'p', 'lastModifiedTime': '1518522714837',
'schema': {'fields': []}}))
def test_should_return_false_if_get_table_has_the_same_modification_time_than_census(self):
# given
sli_table = {
"snapshotTime": None,
"projectId": 'p',
"datasetId": 'd',
"tableId": 'd',
"partitionId": None,
"creationTime": None,
"lastModifiedTime": float('1.518522714837E9'),
"backupCreated": None,
"backupLastModified": None,
"xDays": 4
}
# when
is_modified = SLITableNewerModificationPredicate(BigQuery()).is_modified_since_last_census_snapshot(sli_table)
# then
self.assertFalse(is_modified)
@patch('src.commons.big_query.big_query.BigQuery.__init__',
Mock(return_value=None))
@patch('src.commons.big_query.big_query.BigQuery.get_table',
Mock(return_value={'projectId': 'p', 'lastModifiedTime': '1418522714837',
'schema': {'fields': []}}))
def test_should_return_false_if_get_table_has_the_older_modification_time_than_census(self):
# given
sli_table = {
"snapshotTime": None,
"projectId": 'p',
"datasetId": 'd',
"tableId": 'd',
"partitionId": None,
"creationTime": None,
"lastModifiedTime": float('1.518522714837E9'),
"backupCreated": None,
"backupLastModified": None,
"xDays": 4
}
# when
is_modified = SLITableNewerModificationPredicate(BigQuery()).is_modified_since_last_census_snapshot(sli_table)
# then
self.assertFalse(is_modified)
| 36.539326
| 118
| 0.608856
| 305
| 3,252
| 6.147541
| 0.236066
| 0.059733
| 0.048533
| 0.0672
| 0.84
| 0.84
| 0.8096
| 0.8096
| 0.8096
| 0.8096
| 0
| 0.035608
| 0.2746
| 3,252
| 88
| 119
| 36.954545
| 0.75922
| 0.014453
| 0
| 0.757576
| 0
| 0
| 0.26385
| 0.092958
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.045455
| false
| 0
| 0.060606
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b9f88ef8d854e5ded4dacd5315333903a5bfde7
| 34
|
py
|
Python
|
tests/test_toy_projpy.py
|
anasm-17/toy_projpy
|
c751ddf0a52f98437fffc3eec6c691df67f3406f
|
[
"MIT"
] | null | null | null |
tests/test_toy_projpy.py
|
anasm-17/toy_projpy
|
c751ddf0a52f98437fffc3eec6c691df67f3406f
|
[
"MIT"
] | null | null | null |
tests/test_toy_projpy.py
|
anasm-17/toy_projpy
|
c751ddf0a52f98437fffc3eec6c691df67f3406f
|
[
"MIT"
] | null | null | null |
from toy_projpy import toy_projpy
| 17
| 33
| 0.882353
| 6
| 34
| 4.666667
| 0.666667
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5beb38a7a1d81e467ec5c3095ad8036b4f41a819
| 18,337
|
py
|
Python
|
directdm/num/single_nucleon_form_factors.py
|
DirectDM/directdm-py
|
9e940703bc4e5b2266ce2c93c27abee755c0cbaf
|
[
"MIT"
] | 5
|
2017-09-09T16:22:00.000Z
|
2021-11-17T07:31:11.000Z
|
directdm/num/single_nucleon_form_factors.py
|
DirectDM/directdm-py
|
9e940703bc4e5b2266ce2c93c27abee755c0cbaf
|
[
"MIT"
] | 2
|
2018-04-17T16:43:27.000Z
|
2018-04-19T12:34:54.000Z
|
directdm/num/single_nucleon_form_factors.py
|
DirectDM/directdm-py
|
9e940703bc4e5b2266ce2c93c27abee755c0cbaf
|
[
"MIT"
] | 2
|
2018-05-10T17:39:57.000Z
|
2018-09-19T16:40:07.000Z
|
#!/usr/bin/env python3
class F1:
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor F1
Return the nuclear form factor F1
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
if self.nucleon == 'p':
if self.quark == 'u':
return 2
if self.quark == 'd':
return 1
if self.quark == 's':
return 0
if self.nucleon == 'n':
if self.quark == 'u':
return 1
if self.quark == 'd':
return 2
if self.quark == 's':
return 0
def first_deriv_zero_mom(self):
""" Return the value of the first derivative of the form factor
w.r.t. q^2 at zero momentum transfer (only strange quark) """
if self.nucleon == 'p':
if self.quark == 's':
return self.ip['rs2'] / 6
if self.nucleon == 'n':
if self.quark == 's':
return self.ip['rs2'] / 6
class F2(object):
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor F2
Return the nuclear form factor F2
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
if self.nucleon == 'p':
if self.quark == 'u':
return 2*self.ip['ap'] + self.ip['an'] + self.ip['F2sp']
if self.quark == 'd':
return 2*self.ip['an'] + self.ip['ap'] + self.ip['F2sp']
if self.quark == 's':
return self.ip['F2sp']
if self.nucleon == 'n':
if self.quark == 'u':
return 2*self.ip['an'] + self.ip['ap'] + self.ip['F2sp']
if self.quark == 'd':
return 2*self.ip['ap'] + self.ip['an'] + self.ip['F2sp']
if self.quark == 's':
return self.ip['F2sp']
class FA(object):
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor FA at zero momentum transfer
Return the nuclear form factor FA, evaluated at zero momentum transfer.
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
if self.nucleon == 'p':
if self.quark == 'u':
return self.ip['Deltaup']
if self.quark == 'd':
return self.ip['Deltadp']
if self.quark == 's':
return self.ip['Deltas']
if self.nucleon == 'n':
if self.quark == 'u':
return self.ip['Deltaun']
if self.quark == 'd':
return self.ip['Deltadn']
if self.quark == 's':
return self.ip['Deltas']
class FPprimed(object):
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor FPprimed
Return the nuclear form factor FPprimed
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_pion_pole(self):
""" Return the coefficient of the pion pole
The pion pole is given, in terms of the spatial momentum q, by 1 / (q^2 + mpi0^2)
"""
self.mN = (self.ip['mproton'] + self.ip['mneutron'])/2
if self.nucleon == 'p':
if self.quark == 'u':
return self.mN**2 * 2 * self.ip['gA']
if self.quark == 'd':
return - self.mN**2 * 2 * self.ip['gA']
if self.quark == 's':
return 0
if self.nucleon == 'n':
if self.quark == 'u':
return - self.mN**2 * 2 * self.ip['gA']
if self.quark == 'd':
return self.mN**2 * 2 * self.ip['gA']
if self.quark == 's':
return 0
def value_eta_pole(self):
""" Return the coefficient of the pion pole
The eta pole is given, in terms of the spatial momentum q, by 1 / (q^2 + meta^2)
"""
self.mN = (self.ip['mproton'] + self.ip['mneutron'])/2
if self.nucleon == 'p':
if self.quark == 'u':
return self.mN**2 * 2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3
if self.quark == 'd':
return self.mN**2 * 2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3
if self.quark == 's':
return - self.mN**2 * 4 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3
if self.nucleon == 'n':
if self.quark == 'u':
return self.mN**2 * 2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3
if self.quark == 'd':
return self.mN**2 * 2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3
if self.quark == 's':
return - self.mN**2 * 4 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3
class FS(object):
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor FS
Return the nuclear form factor FS
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
if self.nucleon == 'p':
if self.quark == 'u':
return self.ip['sigmaup']
if self.quark == 'd':
return self.ip['sigmadp']
if self.quark == 's':
return self.ip['sigmas']
if self.nucleon == 'n':
if self.quark == 'u':
return self.ip['sigmaun']
if self.quark == 'd':
return self.ip['sigmadn']
if self.quark == 's':
return self.ip['sigmas']
class FP(object):
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor FP
Return the nuclear form factor FP
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_pion_pole(self):
""" Return the coefficient of the pion pole
The pion pole is given, in terms of the spatial momentum q, by 1 / (q^2 + mpi0^2)
"""
self.mN = (self.ip['mproton'] + self.ip['mneutron'])/2
if self.nucleon == 'p':
if self.quark == 'u':
return self.mN**2 * self.ip['gA'] * self.ip['B0mu'] / self.mN
if self.quark == 'd':
return - self.mN**2 * self.ip['gA'] * self.ip['B0md'] / self.mN
if self.quark == 's':
return 0
if self.nucleon == 'n':
if self.quark == 'u':
return - self.mN**2 * self.ip['gA'] * self.ip['B0mu'] / self.mN
if self.quark == 'd':
return self.mN**2 * self.ip['gA'] * self.ip['B0md'] / self.mN
if self.quark == 's':
return 0
def value_eta_pole(self):
""" Return the coefficient of the pion pole
The eta pole is given, in terms of the spatial momentum q, by 1 / (q^2 + meta^2)
"""
self.mN = (self.ip['mproton'] + self.ip['mneutron'])/2
if self.nucleon == 'p':
if self.quark == 'u':
return self.mN**2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3/self.mN * self.ip['B0mu']
if self.quark == 'd':
return self.mN**2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3/self.mN * self.ip['B0md']
if self.quark == 's':
return - 2 * self.mN**2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3/self.mN * self.ip['B0ms']
if self.nucleon == 'n':
if self.quark == 'u':
return self.mN**2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3/self.mN * self.ip['B0mu']
if self.quark == 'd':
return self.mN**2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3/self.mN * self.ip['B0md']
if self.quark == 's':
return - 2 * self.mN**2 * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])/3/self.mN * self.ip['B0ms']
class FT0(object):
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor FT0
Return the nuclear form factor FT0
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
if self.nucleon == 'p':
if self.quark == 'u':
return self.ip['mu_at_2GeV'] * self.ip['gTu']
if self.quark == 'd':
return self.ip['md_at_2GeV'] * self.ip['gTd']
if self.quark == 's':
return self.ip['ms_at_2GeV'] * self.ip['gTs']
if self.nucleon == 'n':
if self.quark == 'u':
return self.ip['mu_at_2GeV'] * self.ip['gTd']
if self.quark == 'd':
return self.ip['md_at_2GeV'] * self.ip['gTu']
if self.quark == 's':
return self.ip['ms_at_2GeV'] * self.ip['gTs']
class FT1(object):
def __init__(self, quark, nucleon, input_dict):
""" The nuclear form factor FT1
Return the nuclear form factor FT1
Arguments
---------
quark = 'u', 'd', 's' -- the quark flavor (up, down, strange)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.quark = quark
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
if self.nucleon == 'p':
if self.quark == 'u':
return - self.ip['mu_at_2GeV'] * self.ip['BT10up']
if self.quark == 'd':
return - self.ip['md_at_2GeV'] * self.ip['BT10dp']
if self.quark == 's':
return - self.ip['ms_at_2GeV'] * self.ip['BT10s']
if self.nucleon == 'n':
if self.quark == 'u':
return - self.ip['mu_at_2GeV'] * self.ip['BT10un']
if self.quark == 'd':
return - self.ip['md_at_2GeV'] * self.ip['BT10dn']
if self.quark == 's':
return - self.ip['ms_at_2GeV'] * self.ip['BT10s']
class FG(object):
def __init__(self, nucleon, input_dict):
""" The nuclear form factor FG
Return the nuclear form factor FG
Arguments
---------
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
if self.nucleon == 'p':
return -2*self.ip['mG']/27
if self.nucleon == 'n':
return -2*self.ip['mG']/27
class FGtilde(object):
def __init__(self, nucleon, input_dict):
""" The nuclear form factor FGtilde
Return the nuclear form factor FGtilde
Arguments
---------
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
self.mtilde = 1/(1/self.ip['mu_at_2GeV'] + 1/self.ip['md_at_2GeV'] + 1/self.ip['ms_at_2GeV'])
self.mN = (self.ip['mproton'] + self.ip['mneutron'])/2
if self.nucleon == 'p':
return -self.mN * self.mtilde * (self.ip['Deltaup']/self.ip['mu_at_2GeV']\
+ self.ip['Deltadp']/self.ip['md_at_2GeV']\
+ self.ip['Deltas']/self.ip['ms_at_2GeV'])
if self.nucleon == 'n':
return -self.mN * self.mtilde * (self.ip['Deltaun']/self.ip['mu_at_2GeV']\
+ self.ip['Deltadn']/self.ip['md_at_2GeV']\
+ self.ip['Deltas']/self.ip['ms_at_2GeV'])
def value_pion_pole(self):
""" Return the coefficient of the pion pole
The pion pole is given, in terms of the spatial momentum q, by q^2 / (q^2 + mpi0^2)
"""
self.mtilde = 1/(1/self.ip['mu_at_2GeV'] + 1/self.ip['md_at_2GeV'] + 1/self.ip['ms_at_2GeV'])
self.mN = (self.ip['mproton'] + self.ip['mneutron'])/2
if self.nucleon == 'p':
return self.mN * self.mtilde * self.ip['gA'] * (1/self.ip['mu_at_2GeV'] - 1/self.ip['md_at_2GeV']) / 2
if self.nucleon == 'n':
return - self.mN * self.mtilde * self.ip['gA'] * (1/self.ip['mu_at_2GeV'] - 1/self.ip['md_at_2GeV']) / 2
def value_eta_pole(self):
""" Return the coefficient of the eta pole
The eta pole is given, in terms of the spatial momentum q, by q^2 / (q^2 + meta^2)
"""
self.mtilde = 1/(1/self.ip['mu_at_2GeV'] + 1/self.ip['md_at_2GeV'] + 1/self.ip['ms_at_2GeV'])
self.mN = (self.ip['mproton'] + self.ip['mneutron'])/2
if self.nucleon == 'p':
return self.mN * self.mtilde * (self.ip['Deltaup'] + self.ip['Deltadp'] - 2*self.ip['Deltas'])\
* (1/self.ip['mu_at_2GeV'] + 1/self.ip['md_at_2GeV'] - 2/self.ip['ms_at_2GeV']) / 6
if self.nucleon == 'n':
return self.mN * self.mtilde * (self.ip['Deltaun'] + self.ip['Deltadn'] - 2*self.ip['Deltas'])\
* (1/self.ip['mu_at_2GeV'] + 1/self.ip['md_at_2GeV'] - 2/self.ip['ms_at_2GeV']) / 6
class FTwist2:
def __init__(self, flavor, nucleon, input_dict):
""" The twist-two nuclear form factors
Return the twist-two nuclear form factors
Arguments
---------
flavor = 'u', 'd', 's', 'g' -- the "quark" flavor (up, down, strange, or gluon contribution)
nucleon = 'p', 'n' -- the nucleon (proton or neutron)
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
self.flavor = flavor
self.nucleon = nucleon
self.ip = input_dict
def value_zero_mom(self):
""" Return the value of the form factor at zero momentum transfer """
self.mp = self.ip['mproton']
self.mn = self.ip['mneutron']
if self.nucleon == 'p':
if self.flavor == 'u':
return 3/4 * self.mp * self.ip['f2up']
if self.flavor == 'd':
return 3/4 * self.mp * self.ip['f2dp']
if self.flavor == 's':
return 3/4 * self.mp * self.ip['f2sp']
if self.flavor == 'g':
return 3/4 * self.mp * self.ip['f2g']
if self.nucleon == 'n':
if self.flavor == 'u':
return 3/4 * self.mn * self.ip['f2un']
if self.flavor == 'd':
return 3/4 * self.mn * self.ip['f2dn']
if self.flavor == 's':
return 3/4 * self.mn * self.ip['f2sn']
if self.flavor == 'g':
return 3/4 * self.mn * self.ip['f2g']
| 35.536822
| 133
| 0.503572
| 2,368
| 18,337
| 3.813767
| 0.056588
| 0.120917
| 0.075518
| 0.029233
| 0.944524
| 0.903333
| 0.884952
| 0.858598
| 0.818514
| 0.809434
| 0
| 0.019987
| 0.345149
| 18,337
| 515
| 134
| 35.605825
| 0.732095
| 0.273
| 0
| 0.805243
| 0
| 0
| 0.095847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101124
| false
| 0
| 0
| 0
| 0.434457
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
75129e33dd7897210069bec3c1355b219353861a
| 6,749
|
py
|
Python
|
hackersinresidence/webapp/migrations/0007_auto_20171221_0729.py
|
noisebridge/hackersinresidence
|
2a1ae32ef6f49614b295a32933974e34266c5411
|
[
"MIT"
] | 4
|
2018-03-12T22:46:13.000Z
|
2019-07-20T01:58:37.000Z
|
hackersinresidence/webapp/migrations/0007_auto_20171221_0729.py
|
noisebridge/hackersinresidence
|
2a1ae32ef6f49614b295a32933974e34266c5411
|
[
"MIT"
] | 10
|
2018-02-18T09:07:15.000Z
|
2018-02-25T22:18:37.000Z
|
hackersinresidence/webapp/migrations/0007_auto_20171221_0729.py
|
noisebridge/hackersinresidence
|
2a1ae32ef6f49614b295a32933974e34266c5411
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-12-21 07:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0006_auto_20171205_1929'),
]
operations = [
migrations.AddField(
model_name='opportunity',
name='offer_additional_detail',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='opportunity',
name='offer_food_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='offer_food_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='offer_housing_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='offer_housing_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='offer_stipend_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='offer_stipend_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='offer_studio_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='offer_studio_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='offer_tools_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='offer_tools_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='offer_travel_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='offer_travel_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_class_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='require_class_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_date_detail',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_end_date',
field=models.DateField(blank=True, help_text='Latest date the residency can end', null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_hackathon_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='require_hackathon_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_language',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_maximum_stay',
field=models.CharField(blank=True, help_text='Minimum required length of stay', max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_mentoring_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='require_mentoring_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_minimum_stay',
field=models.CharField(blank=True, help_text='Minimum required length of stay', max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_other_requirements',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_presentation_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='require_presentation_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_start_date',
field=models.DateField(blank=True, help_text='Earliest date the residency can start', null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_talk_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='require_talk_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='opportunity',
name='require_workshop_checkbox',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='opportunity',
name='require_workshop_detail',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name='opportunity',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='opportunity',
name='expiration_date',
field=models.DateField(blank=True, null=True),
),
]
| 36.284946
| 119
| 0.585568
| 628
| 6,749
| 6.093949
| 0.136943
| 0.079958
| 0.177685
| 0.213222
| 0.910896
| 0.910896
| 0.902273
| 0.889992
| 0.795662
| 0.764568
| 0
| 0.016635
| 0.30523
| 6,749
| 185
| 120
| 36.481081
| 0.799531
| 0.010076
| 0
| 0.747191
| 1
| 0
| 0.186134
| 0.060647
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011236
| 0
| 0.02809
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f32c8e95743fd38ffb623c448ca2952d8310f5c7
| 46
|
py
|
Python
|
NavigationSystem/__init__.py
|
CallumJHays/g26-egb320-2019
|
6dde6b5d2f72fac3928c5042a27dc50e978c3425
|
[
"MIT"
] | null | null | null |
NavigationSystem/__init__.py
|
CallumJHays/g26-egb320-2019
|
6dde6b5d2f72fac3928c5042a27dc50e978c3425
|
[
"MIT"
] | null | null | null |
NavigationSystem/__init__.py
|
CallumJHays/g26-egb320-2019
|
6dde6b5d2f72fac3928c5042a27dc50e978c3425
|
[
"MIT"
] | null | null | null |
from .NavigationSystem import NavigationSystem
| 46
| 46
| 0.913043
| 4
| 46
| 10.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f32ecbb312d7d9c0f8996936d71ce79cd9ef4ab1
| 119
|
py
|
Python
|
utils/__init__.py
|
Untesler/autotrading
|
4ea15dd89960ce14caa1e09119769a027730c02e
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
Untesler/autotrading
|
4ea15dd89960ce14caa1e09119769a027730c02e
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
Untesler/autotrading
|
4ea15dd89960ce14caa1e09119769a027730c02e
|
[
"MIT"
] | null | null | null |
from utils.predictor import *
from utils.smoother import *
from utils.preprocessstock import *
from utils.obv import *
| 23.8
| 35
| 0.798319
| 16
| 119
| 5.9375
| 0.4375
| 0.378947
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134454
| 119
| 4
| 36
| 29.75
| 0.92233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f341b8ac3d886894bdd3bb1a5297ed0905f3c132
| 132
|
py
|
Python
|
lib/sshfdpass/actions/tcp4.py
|
pasztor/sshfdpass
|
81d135021191a272eefb5a53610c1a78e3496ee5
|
[
"MIT"
] | 1
|
2020-02-27T12:36:19.000Z
|
2020-02-27T12:36:19.000Z
|
lib/sshfdpass/actions/tcp4.py
|
pasztor/sshfdpass
|
81d135021191a272eefb5a53610c1a78e3496ee5
|
[
"MIT"
] | null | null | null |
lib/sshfdpass/actions/tcp4.py
|
pasztor/sshfdpass
|
81d135021191a272eefb5a53610c1a78e3496ee5
|
[
"MIT"
] | null | null | null |
import sshfdpass.actions.tcp
class Action(sshfdpass.actions.tcp.Action):
def _defaults(self):
return dict(aforder='4')
| 22
| 43
| 0.719697
| 17
| 132
| 5.529412
| 0.764706
| 0.340426
| 0.404255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009009
| 0.159091
| 132
| 5
| 44
| 26.4
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.5
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 8
|
f355b878c0f5146fbad39e4db768c31bb8a511db
| 180
|
py
|
Python
|
src/boost_histogram/utils.py
|
HDembinski/boost-histogram
|
6071588d8b58504938f72818d22ff3ce2a5b45dc
|
[
"BSD-3-Clause"
] | null | null | null |
src/boost_histogram/utils.py
|
HDembinski/boost-histogram
|
6071588d8b58504938f72818d22ff3ce2a5b45dc
|
[
"BSD-3-Clause"
] | null | null | null |
src/boost_histogram/utils.py
|
HDembinski/boost-histogram
|
6071588d8b58504938f72818d22ff3ce2a5b45dc
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
del absolute_import, division, print_function
__all__ = ("set_family",)
from ._internal.utils import set_family
| 22.5
| 64
| 0.822222
| 23
| 180
| 5.782609
| 0.565217
| 0.210526
| 0.330827
| 0.406015
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 180
| 7
| 65
| 25.714286
| 0.83125
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
f38a94fa36c647cb06a59b34e6eaada1e29550a3
| 6,960
|
py
|
Python
|
lib/SystemEventLogLib/eventing_service_events.py
|
mhocouchbase/testrunner
|
10faf6955a905dee9a254daf90352881d4687735
|
[
"Apache-2.0"
] | null | null | null |
lib/SystemEventLogLib/eventing_service_events.py
|
mhocouchbase/testrunner
|
10faf6955a905dee9a254daf90352881d4687735
|
[
"Apache-2.0"
] | null | null | null |
lib/SystemEventLogLib/eventing_service_events.py
|
mhocouchbase/testrunner
|
10faf6955a905dee9a254daf90352881d4687735
|
[
"Apache-2.0"
] | null | null | null |
from SystemEventLogLib.Events import Event
from constants.cb_constants.system_event_log import Eventing
class EventingServiceEvents(object):
@staticmethod
def producer_startup(node):
return {
Event.Fields.EVENT_ID: Eventing.ProducerStartup,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "eventing-producer process startup",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node
}
@staticmethod
def consumer_startup(node):
return {
Event.Fields.EVENT_ID: Eventing.ConsumerStartup,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "eventing-producer process startup",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node
}
@staticmethod
def consumer_crash(node):
return {
Event.Fields.EVENT_ID: Eventing.ConsumerCrash,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "eventing-producer process startup",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node
}
@staticmethod
def start_tracing(node):
return {
Event.Fields.EVENT_ID: Eventing.StartTracing,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Tracing started",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node
}
@staticmethod
def stop_tracing(node):
return {
Event.Fields.EVENT_ID: Eventing.StopTracing,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Tracing stopped",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node
}
@staticmethod
def start_debugger(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.StartDebugger,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Debugger started",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
@staticmethod
def stop_debugger(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.StopDebugger,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Debugger stopped",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
@staticmethod
def create_function(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.CreateFunction,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Create Function",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
@staticmethod
def delete_function(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.DeleteFunction,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Delete Function",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
@staticmethod
def import_functions(node):
return {
Event.Fields.EVENT_ID: Eventing.ImportFunctions,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Import Functions",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node
}
@staticmethod
def export_functions(node):
return {
Event.Fields.EVENT_ID: Eventing.ExportFunctions,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Export Functions",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node
}
@staticmethod
def deploy_function(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.DeployFunction,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Function deployed",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
@staticmethod
def undeploy_function(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.UndeployFunction,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Function undeployed",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
@staticmethod
def resume_function(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.ResumeFunction,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Function resumed",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
@staticmethod
def pause_function(node, appname):
return {
Event.Fields.EVENT_ID: Eventing.PauseFunction,
Event.Fields.COMPONENT: Event.Component.EVENTING,
Event.Fields.DESCRIPTION: "Function paused",
Event.Fields.SEVERITY: Event.Severity.INFO,
Event.Fields.SUB_COMPONENT: "eventing-producer",
Event.Fields.NODE_NAME: node,
Event.Fields.EXTRA_ATTRS: {"appName": appname}
}
| 39.322034
| 74
| 0.632902
| 679
| 6,960
| 6.382916
| 0.098675
| 0.248731
| 0.058837
| 0.076142
| 0.892017
| 0.892017
| 0.892017
| 0.88371
| 0.823258
| 0.670051
| 0
| 0
| 0.270259
| 6,960
| 177
| 75
| 39.322034
| 0.853318
| 0
| 0
| 0.627329
| 0
| 0
| 0.086338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093168
| false
| 0
| 0.031056
| 0.093168
| 0.223602
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
caf10f8f4f26e98a338c344dd037e7129872b8e5
| 269
|
py
|
Python
|
public/course/NLP/Hockey/sample.py
|
Caterpie-poke/playground
|
da78cf88154587c555d69a131a418fb999c14409
|
[
"MIT"
] | null | null | null |
public/course/NLP/Hockey/sample.py
|
Caterpie-poke/playground
|
da78cf88154587c555d69a131a418fb999c14409
|
[
"MIT"
] | 3
|
2019-11-08T04:08:51.000Z
|
2020-03-02T14:01:38.000Z
|
public/course/NLP/Hockey/sample.py
|
Caterpie-poke/playground
|
da78cf88154587c555d69a131a418fb999c14409
|
[
"MIT"
] | 3
|
2019-10-28T02:49:41.000Z
|
2019-12-01T09:01:42.000Z
|
from puppy2d import *
Rectangle(0, 500, 800, 50, isStatic=True)
Rectangle(0, -500, 800, 50, isStatic=True)
Rectangle(400, 0, 50, 1000, isStatic=True)
Rectangle(-400, 0, 50, 1000, isStatic=True)
Circle(0, 300, 50)
Circle(0, -300, 50)
Circle(0, 0, 25, restitution=0.9)
| 24.454545
| 43
| 0.69145
| 46
| 269
| 4.043478
| 0.369565
| 0.258065
| 0.33871
| 0.172043
| 0.822581
| 0.822581
| 0.655914
| 0.655914
| 0.397849
| 0
| 0
| 0.244635
| 0.133829
| 269
| 10
| 44
| 26.9
| 0.553648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1b2c94bd0141a5d0ed28cb6031353270e7a9cb5a
| 24,197
|
py
|
Python
|
sense/client/profile_api.py
|
sdn-sense/sense-o-py-client
|
d686c4a2e084fbb6d8ff3b00c6f73db63965f9c6
|
[
"MIT"
] | null | null | null |
sense/client/profile_api.py
|
sdn-sense/sense-o-py-client
|
d686c4a2e084fbb6d8ff3b00c6f73db63965f9c6
|
[
"MIT"
] | 22
|
2020-08-27T21:57:47.000Z
|
2022-03-15T14:57:28.000Z
|
sense/client/profile_api.py
|
sdn-sense/sense-o-py-client
|
d686c4a2e084fbb6d8ff3b00c6f73db63965f9c6
|
[
"MIT"
] | 1
|
2021-03-30T06:30:20.000Z
|
2021-03-30T06:30:20.000Z
|
# coding: utf-8
"""
SENSE-O Northbound Intent API
StackV SENSE-O Northbound REST API Documentation # noqa: E501
OpenAPI spec version: 2.0.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import six
from sense.client.requestwrapper import RequestWrapper
class ProfileApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, req_wrapper=None):
if req_wrapper is None:
self.client = RequestWrapper()
else:
self.client = req_wrapper
if 'SI_UUID' in self.client.config:
self.si_uuid = self.client.config['SI_UUID']
else:
self.si_uuid = None
def profile_list(self, **kwargs):
"""Get skimmed profile data # noqa: E501
Retrieves the list of profiles the user is permitted to use without any JSON data. # noqa: E501
This method makes a synchronous HTTP request by default.
:param async_req bool
:return: list[SlimProfile]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.profile_get_with_http_info(**kwargs) # noqa: E501
return data
def profile_get_with_http_info(self, **kwargs):
"""Get skimmed profile data # noqa: E501
Retrieves the list of profiles the user is permitted to use without any JSON data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[SlimProfile]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_get" % key)
params[key] = val
del params['kwargs']
return self.client.request('GET', f'/profile')
def profile_describe(self, uuid, **kwargs): # noqa: E501
"""Get single profile # noqa: E501
Retrieves the specified profile. # noqa: E501
This method makes a synchronous HTTP request by default.
:param async_req bool
:param str uuid: Profile UUID. (required)
:return: FullProfile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_uuid_get_with_http_info(uuid,
**kwargs) # noqa: E501
else:
(data) = self.profile_uuid_get_with_http_info(
uuid, **kwargs) # noqa: E501
return data
def profile_uuid_get_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get single profile # noqa: E501
Retrieves the specified profile. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_get_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: Profile UUID. (required)
:return: FullProfile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_uuid_get" % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params or params['uuid'] is None):
raise ValueError(
"Missing the required parameter `uuid` when calling `profile_uuid_get`"
) # noqa: E501
return self.client.request('GET', f'/profile/' + uuid)
def profile_create(self, body, **kwargs): # noqa: E501
"""Create a profile # noqa: E501
Builds and saves a new profile, using provided starting data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_create(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileManifest body: Profile creation manifest. (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_post_with_http_info(body,
**kwargs) # noqa: E501
else:
(data) = self.profile_post_with_http_info(body,
**kwargs) # noqa: E501
return data
def profile_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Create a profile # noqa: E501
Builds and saves a new profile, using provided starting data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileManifest body: Profile creation manifest. (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_post" % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or params['body'] is None):
raise ValueError(
"Missing the required parameter `body` when calling `profile_post`"
) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
return self.client.request('POST',
f'/profile',
body_params=body_params)
def profile_delete(self, uuid, **kwargs): # noqa: E501
"""Delete profile # noqa: E501
Deletes the specified profile. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_delete(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_uuid_delete_with_http_info(
uuid, **kwargs) # noqa: E501
else:
(data) = self.profile_uuid_delete_with_http_info(
uuid, **kwargs) # noqa: E501
return data
def profile_uuid_delete_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Delete profile # noqa: E501
Deletes the specified profile. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_delete_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_uuid_delete" % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params or params['uuid'] is None):
raise ValueError(
"Missing the required parameter `uuid` when calling `profile_uuid_delete`"
) # noqa: E501
return self.client.request('DELETE', f'/profile/{uuid}')
def profile_add_licenses(self, body, uuid, **kwargs): # noqa: E501
"""Add new license # noqa: E501
Assigns a new license to a user, giving them access to execute the specified profile (and potentially edit as well). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_add_licenses(body, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileLicense body: License object. (required)
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_uuid_licenses_post_with_http_info(
body, uuid, **kwargs) # noqa: E501
else:
(data) = self.profile_uuid_licenses_post_with_http_info(
body, uuid, **kwargs) # noqa: E501
return data
def profile_uuid_licenses_post_with_http_info(self, body, uuid,
**kwargs): # noqa: E501
"""Add new license # noqa: E501
Assigns a new license to a user, giving them access to execute the specified profile (and potentially edit as well). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_licenses_post_with_http_info(body, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileLicense body: License object. (required)
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_uuid_licenses_post" % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or params['body'] is None):
raise ValueError(
"Missing the required parameter `body` when calling `profile_uuid_licenses_post`"
) # noqa: E501
# verify the required parameter 'uuid' is set
if ('uuid' not in params or params['uuid'] is None):
raise ValueError(
"Missing the required parameter `uuid` when calling `profile_uuid_licenses_post`"
) # noqa: E501
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid'] # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
return self.client.request('POST',
f'/profile/{uuid}/licenses',
body_params=body_params)
def profile_update_licenses(self, body, uuid, **kwargs): # noqa: E501
"""Edit existing license # noqa: E501
Edits an existing license to a user. Setting the remaining field to 0 will delete the license. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_update_licenses(body, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileLicense body: License object. (required)
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_uuid_licenses_put_with_http_info(
body, uuid, **kwargs) # noqa: E501
else:
(data) = self.profile_uuid_licenses_put_with_http_info(
body, uuid, **kwargs) # noqa: E501
return data
def profile_uuid_licenses_put_with_http_info(self, body, uuid,
**kwargs): # noqa: E501
"""Edit existing license # noqa: E501
Edits an existing license to a user. Setting the remaining field to 0 will delete the license. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_licenses_put_with_http_info(body, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileLicense body: License object. (required)
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_uuid_licenses_put" % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or params['body'] is None):
raise ValueError(
"Missing the required parameter `body` when calling `profile_uuid_licenses_put`"
) # noqa: E501
# verify the required parameter 'uuid' is set
if ('uuid' not in params or params['uuid'] is None):
raise ValueError(
"Missing the required parameter `uuid` when calling `profile_uuid_licenses_put`"
) # noqa: E501
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid'] # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
return self.client.request('PUT',
f'/profile/{uuid}/licenses',
body_params=body_params)
def profile_update(self, body, uuid, **kwargs): # noqa: E501
"""Edit a profile # noqa: E501
Submits an updated version of a profile for saving. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_put(body, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileManifest body: Profile creation manifest. (required)
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_uuid_put_with_http_info(body, uuid,
**kwargs) # noqa: E501
else:
(data) = self.profile_uuid_put_with_http_info(
body, uuid, **kwargs) # noqa: E501
return data
def profile_uuid_put_with_http_info(self, body, uuid,
**kwargs): # noqa: E501
"""Edit a profile # noqa: E501
Submits an updated version of a profile for saving. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_put_with_http_info(body, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ProfileManifest body: Profile creation manifest. (required)
:param str uuid: Profile UUID. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_uuid_put" % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or params['body'] is None):
raise ValueError(
"Missing the required parameter `body` when calling `profile_uuid_put`"
) # noqa: E501
# verify the required parameter 'uuid' is set
if ('uuid' not in params or params['uuid'] is None):
raise ValueError(
"Missing the required parameter `uuid` when calling `profile_uuid_put`"
) # noqa: E501
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid'] # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
return self.client.request('PUT',
f'/profile/{uuid}',
body_params=body_params)
def profile_get_uses(self, uuid, username, **kwargs): # noqa: E501
"""Get license usage # noqa: E501
Retrieves the remaining number of tickets or slots for allocations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_uses_username_get(uuid, username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: Profile UUID. (required)
:param str username: Username of licensed user. (required)
:return: float
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.profile_uuid_uses_username_get_with_http_info(
uuid, username, **kwargs) # noqa: E501
else:
(data) = self.profile_uuid_uses_username_get_with_http_info(
uuid, username, **kwargs) # noqa: E501
return data
def profile_uuid_uses_username_get_with_http_info(self, uuid, username,
**kwargs): # noqa: E501
"""Get license usage # noqa: E501
Retrieves the remaining number of tickets or slots for allocations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.profile_uuid_uses_username_get_with_http_info(uuid, username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: Profile UUID. (required)
:param str username: Username of licensed user. (required)
:return: float
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid', 'username'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
" to method profile_uuid_uses_username_get" %
key)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params or params['uuid'] is None):
raise ValueError(
"Missing the required parameter `uuid` when calling `profile_uuid_uses_username_get`"
) # noqa: E501
# verify the required parameter 'username' is set
if ('username' not in params or params['username'] is None):
raise ValueError(
"Missing the required parameter `username` when calling `profile_uuid_uses_username_get`"
) # noqa: E501
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid'] # noqa: E501
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
return self.client.request('GET', f'/profile/{uuid}/uses/{username}')
| 41.863322
| 138
| 0.589867
| 2,826
| 24,197
| 4.879689
| 0.065464
| 0.049891
| 0.027846
| 0.026106
| 0.949746
| 0.943002
| 0.931545
| 0.917404
| 0.904931
| 0.894416
| 0
| 0.016156
| 0.324668
| 24,197
| 577
| 139
| 41.935875
| 0.827734
| 0.395793
| 0
| 0.690647
| 1
| 0
| 0.204213
| 0.05436
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061151
| false
| 0
| 0.010791
| 0
| 0.161871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1b8b4b00286874edd0587b50f4598e236128cfa5
| 28,074
|
py
|
Python
|
sdk/python/pulumi_consul/autopilot_config.py
|
pulumi/pulumi-consul
|
5b66c5b97fda6b5433bfb4d4173c999e468c82e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2019-11-12T12:21:18.000Z
|
2021-07-31T08:17:22.000Z
|
sdk/python/pulumi_consul/autopilot_config.py
|
pulumi/pulumi-consul
|
5b66c5b97fda6b5433bfb4d4173c999e468c82e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 38
|
2019-11-21T15:19:33.000Z
|
2022-03-31T15:24:11.000Z
|
sdk/python/pulumi_consul/autopilot_config.py
|
pulumi/pulumi-consul
|
5b66c5b97fda6b5433bfb4d4173c999e468c82e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-11-24T12:23:13.000Z
|
2021-12-06T17:33:31.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AutopilotConfigArgs', 'AutopilotConfig']
@pulumi.input_type
class AutopilotConfigArgs:
def __init__(__self__, *,
cleanup_dead_servers: Optional[pulumi.Input[bool]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
disable_upgrade_migration: Optional[pulumi.Input[bool]] = None,
last_contact_threshold: Optional[pulumi.Input[str]] = None,
max_trailing_logs: Optional[pulumi.Input[int]] = None,
redundancy_zone_tag: Optional[pulumi.Input[str]] = None,
server_stabilization_time: Optional[pulumi.Input[str]] = None,
upgrade_version_tag: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AutopilotConfig resource.
:param pulumi.Input[bool] cleanup_dead_servers: Whether to remove failing servers when a
replacement comes online. Defaults to true.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the agent's
default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] disable_upgrade_migration: Whether to disable [upgrade migrations](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones).
Defaults to false.
:param pulumi.Input[str] last_contact_threshold: The time after which a server is
considered as unhealthy and will be removed. Defaults to `"200ms"`.
:param pulumi.Input[int] max_trailing_logs: The maximum number of Raft log entries a
server can trail the leader. Defaults to 250.
:param pulumi.Input[str] redundancy_zone_tag: The [redundancy zone](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones)
tag to use. Consul will try to keep one voting server by zone to take advantage
of isolated failure domains. Defaults to an empty string.
:param pulumi.Input[str] server_stabilization_time: The period to wait for a server to be
healthy and stable before being promoted to a full, voting member. Defaults to
`"10s"`.
:param pulumi.Input[str] upgrade_version_tag: The tag to override the version information
used during a migration. Defaults to an empty string.
"""
if cleanup_dead_servers is not None:
pulumi.set(__self__, "cleanup_dead_servers", cleanup_dead_servers)
if datacenter is not None:
pulumi.set(__self__, "datacenter", datacenter)
if disable_upgrade_migration is not None:
pulumi.set(__self__, "disable_upgrade_migration", disable_upgrade_migration)
if last_contact_threshold is not None:
pulumi.set(__self__, "last_contact_threshold", last_contact_threshold)
if max_trailing_logs is not None:
pulumi.set(__self__, "max_trailing_logs", max_trailing_logs)
if redundancy_zone_tag is not None:
pulumi.set(__self__, "redundancy_zone_tag", redundancy_zone_tag)
if server_stabilization_time is not None:
pulumi.set(__self__, "server_stabilization_time", server_stabilization_time)
if upgrade_version_tag is not None:
pulumi.set(__self__, "upgrade_version_tag", upgrade_version_tag)
@property
@pulumi.getter(name="cleanupDeadServers")
def cleanup_dead_servers(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to remove failing servers when a
replacement comes online. Defaults to true.
"""
return pulumi.get(self, "cleanup_dead_servers")
@cleanup_dead_servers.setter
def cleanup_dead_servers(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cleanup_dead_servers", value)
@property
@pulumi.getter
def datacenter(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter to use. This overrides the agent's
default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@datacenter.setter
def datacenter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter", value)
@property
@pulumi.getter(name="disableUpgradeMigration")
def disable_upgrade_migration(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to disable [upgrade migrations](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones).
Defaults to false.
"""
return pulumi.get(self, "disable_upgrade_migration")
@disable_upgrade_migration.setter
def disable_upgrade_migration(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_upgrade_migration", value)
@property
@pulumi.getter(name="lastContactThreshold")
def last_contact_threshold(self) -> Optional[pulumi.Input[str]]:
"""
The time after which a server is
considered as unhealthy and will be removed. Defaults to `"200ms"`.
"""
return pulumi.get(self, "last_contact_threshold")
@last_contact_threshold.setter
def last_contact_threshold(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_contact_threshold", value)
@property
@pulumi.getter(name="maxTrailingLogs")
def max_trailing_logs(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of Raft log entries a
server can trail the leader. Defaults to 250.
"""
return pulumi.get(self, "max_trailing_logs")
@max_trailing_logs.setter
def max_trailing_logs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_trailing_logs", value)
@property
@pulumi.getter(name="redundancyZoneTag")
def redundancy_zone_tag(self) -> Optional[pulumi.Input[str]]:
"""
The [redundancy zone](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones)
tag to use. Consul will try to keep one voting server by zone to take advantage
of isolated failure domains. Defaults to an empty string.
"""
return pulumi.get(self, "redundancy_zone_tag")
@redundancy_zone_tag.setter
def redundancy_zone_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redundancy_zone_tag", value)
@property
@pulumi.getter(name="serverStabilizationTime")
def server_stabilization_time(self) -> Optional[pulumi.Input[str]]:
"""
The period to wait for a server to be
healthy and stable before being promoted to a full, voting member. Defaults to
`"10s"`.
"""
return pulumi.get(self, "server_stabilization_time")
@server_stabilization_time.setter
def server_stabilization_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_stabilization_time", value)
@property
@pulumi.getter(name="upgradeVersionTag")
def upgrade_version_tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag to override the version information
used during a migration. Defaults to an empty string.
"""
return pulumi.get(self, "upgrade_version_tag")
@upgrade_version_tag.setter
def upgrade_version_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "upgrade_version_tag", value)
@pulumi.input_type
class _AutopilotConfigState:
def __init__(__self__, *,
cleanup_dead_servers: Optional[pulumi.Input[bool]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
disable_upgrade_migration: Optional[pulumi.Input[bool]] = None,
last_contact_threshold: Optional[pulumi.Input[str]] = None,
max_trailing_logs: Optional[pulumi.Input[int]] = None,
redundancy_zone_tag: Optional[pulumi.Input[str]] = None,
server_stabilization_time: Optional[pulumi.Input[str]] = None,
upgrade_version_tag: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AutopilotConfig resources.
:param pulumi.Input[bool] cleanup_dead_servers: Whether to remove failing servers when a
replacement comes online. Defaults to true.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the agent's
default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] disable_upgrade_migration: Whether to disable [upgrade migrations](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones).
Defaults to false.
:param pulumi.Input[str] last_contact_threshold: The time after which a server is
considered as unhealthy and will be removed. Defaults to `"200ms"`.
:param pulumi.Input[int] max_trailing_logs: The maximum number of Raft log entries a
server can trail the leader. Defaults to 250.
:param pulumi.Input[str] redundancy_zone_tag: The [redundancy zone](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones)
tag to use. Consul will try to keep one voting server by zone to take advantage
of isolated failure domains. Defaults to an empty string.
:param pulumi.Input[str] server_stabilization_time: The period to wait for a server to be
healthy and stable before being promoted to a full, voting member. Defaults to
`"10s"`.
:param pulumi.Input[str] upgrade_version_tag: The tag to override the version information
used during a migration. Defaults to an empty string.
"""
if cleanup_dead_servers is not None:
pulumi.set(__self__, "cleanup_dead_servers", cleanup_dead_servers)
if datacenter is not None:
pulumi.set(__self__, "datacenter", datacenter)
if disable_upgrade_migration is not None:
pulumi.set(__self__, "disable_upgrade_migration", disable_upgrade_migration)
if last_contact_threshold is not None:
pulumi.set(__self__, "last_contact_threshold", last_contact_threshold)
if max_trailing_logs is not None:
pulumi.set(__self__, "max_trailing_logs", max_trailing_logs)
if redundancy_zone_tag is not None:
pulumi.set(__self__, "redundancy_zone_tag", redundancy_zone_tag)
if server_stabilization_time is not None:
pulumi.set(__self__, "server_stabilization_time", server_stabilization_time)
if upgrade_version_tag is not None:
pulumi.set(__self__, "upgrade_version_tag", upgrade_version_tag)
@property
@pulumi.getter(name="cleanupDeadServers")
def cleanup_dead_servers(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to remove failing servers when a
replacement comes online. Defaults to true.
"""
return pulumi.get(self, "cleanup_dead_servers")
@cleanup_dead_servers.setter
def cleanup_dead_servers(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cleanup_dead_servers", value)
@property
@pulumi.getter
def datacenter(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter to use. This overrides the agent's
default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@datacenter.setter
def datacenter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter", value)
@property
@pulumi.getter(name="disableUpgradeMigration")
def disable_upgrade_migration(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to disable [upgrade migrations](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones).
Defaults to false.
"""
return pulumi.get(self, "disable_upgrade_migration")
@disable_upgrade_migration.setter
def disable_upgrade_migration(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_upgrade_migration", value)
@property
@pulumi.getter(name="lastContactThreshold")
def last_contact_threshold(self) -> Optional[pulumi.Input[str]]:
"""
The time after which a server is
considered as unhealthy and will be removed. Defaults to `"200ms"`.
"""
return pulumi.get(self, "last_contact_threshold")
@last_contact_threshold.setter
def last_contact_threshold(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_contact_threshold", value)
@property
@pulumi.getter(name="maxTrailingLogs")
def max_trailing_logs(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of Raft log entries a
server can trail the leader. Defaults to 250.
"""
return pulumi.get(self, "max_trailing_logs")
@max_trailing_logs.setter
def max_trailing_logs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_trailing_logs", value)
@property
@pulumi.getter(name="redundancyZoneTag")
def redundancy_zone_tag(self) -> Optional[pulumi.Input[str]]:
"""
The [redundancy zone](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones)
tag to use. Consul will try to keep one voting server by zone to take advantage
of isolated failure domains. Defaults to an empty string.
"""
return pulumi.get(self, "redundancy_zone_tag")
@redundancy_zone_tag.setter
def redundancy_zone_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redundancy_zone_tag", value)
@property
@pulumi.getter(name="serverStabilizationTime")
def server_stabilization_time(self) -> Optional[pulumi.Input[str]]:
"""
The period to wait for a server to be
healthy and stable before being promoted to a full, voting member. Defaults to
`"10s"`.
"""
return pulumi.get(self, "server_stabilization_time")
@server_stabilization_time.setter
def server_stabilization_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_stabilization_time", value)
@property
@pulumi.getter(name="upgradeVersionTag")
def upgrade_version_tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag to override the version information
used during a migration. Defaults to an empty string.
"""
return pulumi.get(self, "upgrade_version_tag")
@upgrade_version_tag.setter
def upgrade_version_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "upgrade_version_tag", value)
class AutopilotConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cleanup_dead_servers: Optional[pulumi.Input[bool]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
disable_upgrade_migration: Optional[pulumi.Input[bool]] = None,
last_contact_threshold: Optional[pulumi.Input[str]] = None,
max_trailing_logs: Optional[pulumi.Input[int]] = None,
redundancy_zone_tag: Optional[pulumi.Input[str]] = None,
server_stabilization_time: Optional[pulumi.Input[str]] = None,
upgrade_version_tag: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides access to the [Autopilot Configuration](https://www.consul.io/docs/guides/autopilot.html)
of Consul to automatically manage Consul servers.
It includes to automatically cleanup dead servers, monitor the status of the Raft
cluster and stable server introduction.
## Example Usage
```python
import pulumi
import pulumi_consul as consul
config = consul.AutopilotConfig("config",
cleanup_dead_servers=False,
last_contact_threshold="1s",
max_trailing_logs=500)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] cleanup_dead_servers: Whether to remove failing servers when a
replacement comes online. Defaults to true.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the agent's
default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] disable_upgrade_migration: Whether to disable [upgrade migrations](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones).
Defaults to false.
:param pulumi.Input[str] last_contact_threshold: The time after which a server is
considered as unhealthy and will be removed. Defaults to `"200ms"`.
:param pulumi.Input[int] max_trailing_logs: The maximum number of Raft log entries a
server can trail the leader. Defaults to 250.
:param pulumi.Input[str] redundancy_zone_tag: The [redundancy zone](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones)
tag to use. Consul will try to keep one voting server by zone to take advantage
of isolated failure domains. Defaults to an empty string.
:param pulumi.Input[str] server_stabilization_time: The period to wait for a server to be
healthy and stable before being promoted to a full, voting member. Defaults to
`"10s"`.
:param pulumi.Input[str] upgrade_version_tag: The tag to override the version information
used during a migration. Defaults to an empty string.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[AutopilotConfigArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides access to the [Autopilot Configuration](https://www.consul.io/docs/guides/autopilot.html)
of Consul to automatically manage Consul servers.
It includes to automatically cleanup dead servers, monitor the status of the Raft
cluster and stable server introduction.
## Example Usage
```python
import pulumi
import pulumi_consul as consul
config = consul.AutopilotConfig("config",
cleanup_dead_servers=False,
last_contact_threshold="1s",
max_trailing_logs=500)
```
:param str resource_name: The name of the resource.
:param AutopilotConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AutopilotConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cleanup_dead_servers: Optional[pulumi.Input[bool]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
disable_upgrade_migration: Optional[pulumi.Input[bool]] = None,
last_contact_threshold: Optional[pulumi.Input[str]] = None,
max_trailing_logs: Optional[pulumi.Input[int]] = None,
redundancy_zone_tag: Optional[pulumi.Input[str]] = None,
server_stabilization_time: Optional[pulumi.Input[str]] = None,
upgrade_version_tag: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AutopilotConfigArgs.__new__(AutopilotConfigArgs)
__props__.__dict__["cleanup_dead_servers"] = cleanup_dead_servers
__props__.__dict__["datacenter"] = datacenter
__props__.__dict__["disable_upgrade_migration"] = disable_upgrade_migration
__props__.__dict__["last_contact_threshold"] = last_contact_threshold
__props__.__dict__["max_trailing_logs"] = max_trailing_logs
__props__.__dict__["redundancy_zone_tag"] = redundancy_zone_tag
__props__.__dict__["server_stabilization_time"] = server_stabilization_time
__props__.__dict__["upgrade_version_tag"] = upgrade_version_tag
super(AutopilotConfig, __self__).__init__(
'consul:index/autopilotConfig:AutopilotConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cleanup_dead_servers: Optional[pulumi.Input[bool]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
disable_upgrade_migration: Optional[pulumi.Input[bool]] = None,
last_contact_threshold: Optional[pulumi.Input[str]] = None,
max_trailing_logs: Optional[pulumi.Input[int]] = None,
redundancy_zone_tag: Optional[pulumi.Input[str]] = None,
server_stabilization_time: Optional[pulumi.Input[str]] = None,
upgrade_version_tag: Optional[pulumi.Input[str]] = None) -> 'AutopilotConfig':
"""
Get an existing AutopilotConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] cleanup_dead_servers: Whether to remove failing servers when a
replacement comes online. Defaults to true.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the agent's
default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] disable_upgrade_migration: Whether to disable [upgrade migrations](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones).
Defaults to false.
:param pulumi.Input[str] last_contact_threshold: The time after which a server is
considered as unhealthy and will be removed. Defaults to `"200ms"`.
:param pulumi.Input[int] max_trailing_logs: The maximum number of Raft log entries a
server can trail the leader. Defaults to 250.
:param pulumi.Input[str] redundancy_zone_tag: The [redundancy zone](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones)
tag to use. Consul will try to keep one voting server by zone to take advantage
of isolated failure domains. Defaults to an empty string.
:param pulumi.Input[str] server_stabilization_time: The period to wait for a server to be
healthy and stable before being promoted to a full, voting member. Defaults to
`"10s"`.
:param pulumi.Input[str] upgrade_version_tag: The tag to override the version information
used during a migration. Defaults to an empty string.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AutopilotConfigState.__new__(_AutopilotConfigState)
__props__.__dict__["cleanup_dead_servers"] = cleanup_dead_servers
__props__.__dict__["datacenter"] = datacenter
__props__.__dict__["disable_upgrade_migration"] = disable_upgrade_migration
__props__.__dict__["last_contact_threshold"] = last_contact_threshold
__props__.__dict__["max_trailing_logs"] = max_trailing_logs
__props__.__dict__["redundancy_zone_tag"] = redundancy_zone_tag
__props__.__dict__["server_stabilization_time"] = server_stabilization_time
__props__.__dict__["upgrade_version_tag"] = upgrade_version_tag
return AutopilotConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cleanupDeadServers")
def cleanup_dead_servers(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to remove failing servers when a
replacement comes online. Defaults to true.
"""
return pulumi.get(self, "cleanup_dead_servers")
@property
@pulumi.getter
def datacenter(self) -> pulumi.Output[Optional[str]]:
"""
The datacenter to use. This overrides the agent's
default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@property
@pulumi.getter(name="disableUpgradeMigration")
def disable_upgrade_migration(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to disable [upgrade migrations](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones).
Defaults to false.
"""
return pulumi.get(self, "disable_upgrade_migration")
@property
@pulumi.getter(name="lastContactThreshold")
def last_contact_threshold(self) -> pulumi.Output[Optional[str]]:
"""
The time after which a server is
considered as unhealthy and will be removed. Defaults to `"200ms"`.
"""
return pulumi.get(self, "last_contact_threshold")
@property
@pulumi.getter(name="maxTrailingLogs")
def max_trailing_logs(self) -> pulumi.Output[Optional[int]]:
"""
The maximum number of Raft log entries a
server can trail the leader. Defaults to 250.
"""
return pulumi.get(self, "max_trailing_logs")
@property
@pulumi.getter(name="redundancyZoneTag")
def redundancy_zone_tag(self) -> pulumi.Output[Optional[str]]:
"""
The [redundancy zone](https://www.consul.io/docs/guides/autopilot.html#redundancy-zones)
tag to use. Consul will try to keep one voting server by zone to take advantage
of isolated failure domains. Defaults to an empty string.
"""
return pulumi.get(self, "redundancy_zone_tag")
@property
@pulumi.getter(name="serverStabilizationTime")
def server_stabilization_time(self) -> pulumi.Output[Optional[str]]:
"""
The period to wait for a server to be
healthy and stable before being promoted to a full, voting member. Defaults to
`"10s"`.
"""
return pulumi.get(self, "server_stabilization_time")
@property
@pulumi.getter(name="upgradeVersionTag")
def upgrade_version_tag(self) -> pulumi.Output[Optional[str]]:
"""
The tag to override the version information
used during a migration. Defaults to an empty string.
"""
return pulumi.get(self, "upgrade_version_tag")
| 48.487047
| 168
| 0.672971
| 3,339
| 28,074
| 5.423181
| 0.068583
| 0.065606
| 0.075547
| 0.054672
| 0.907334
| 0.901149
| 0.895792
| 0.887011
| 0.884195
| 0.884195
| 0
| 0.00304
| 0.238334
| 28,074
| 578
| 169
| 48.570934
| 0.843801
| 0.37305
| 0
| 0.83391
| 1
| 0
| 0.130095
| 0.052366
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16263
| false
| 0.00346
| 0.017301
| 0
| 0.276817
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1bc4f58877774c4d2a942683d70d34207bdc21f2
| 148
|
py
|
Python
|
src/tensora/__init__.py
|
amirmolavi/tensora
|
521a18abbb9689852b533b0d75b92ccd6bcd5245
|
[
"MIT"
] | 3
|
2019-04-24T01:47:20.000Z
|
2021-06-13T10:40:38.000Z
|
src/tensora/__init__.py
|
amirmolavi/tensora
|
521a18abbb9689852b533b0d75b92ccd6bcd5245
|
[
"MIT"
] | 5
|
2021-01-22T18:02:41.000Z
|
2021-02-20T21:11:04.000Z
|
src/tensora/__init__.py
|
amirmolavi/tensora
|
521a18abbb9689852b533b0d75b92ccd6bcd5245
|
[
"MIT"
] | 2
|
2021-01-24T23:05:43.000Z
|
2021-04-19T18:54:55.000Z
|
from .format import Mode, Format # noqa: F401
from .tensor import Tensor # noqa: F401
from .function import tensor_method, evaluate # noqa: F401
| 37
| 59
| 0.75
| 21
| 148
| 5.238095
| 0.47619
| 0.218182
| 0.218182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07377
| 0.175676
| 148
| 3
| 60
| 49.333333
| 0.827869
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1bc5d095dcb686c9c6e8b6eed1065a40a157c58c
| 131
|
py
|
Python
|
tenant_schemas/tests/__init__.py
|
Jragon/django-tenants-rls
|
99a336a0d1ee83c70b6224a583ee8e3b8ee5c930
|
[
"MIT"
] | null | null | null |
tenant_schemas/tests/__init__.py
|
Jragon/django-tenants-rls
|
99a336a0d1ee83c70b6224a583ee8e3b8ee5c930
|
[
"MIT"
] | null | null | null |
tenant_schemas/tests/__init__.py
|
Jragon/django-tenants-rls
|
99a336a0d1ee83c70b6224a583ee8e3b8ee5c930
|
[
"MIT"
] | null | null | null |
from .test_cache import *
from .test_log import *
from .test_routes import *
from .test_tenants import *
from .test_utils import *
| 21.833333
| 27
| 0.770992
| 20
| 131
| 4.8
| 0.4
| 0.416667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152672
| 131
| 5
| 28
| 26.2
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
940c95bf7ba7e46f7a1d4f4a7a6adc1630333a73
| 7,218
|
py
|
Python
|
tests/unit/db/postgres/test_unloader.py
|
ellyteitsworth/records-mover
|
21cd56efc2d23cfff04ec1fdf582e5229546c418
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/db/postgres/test_unloader.py
|
ellyteitsworth/records-mover
|
21cd56efc2d23cfff04ec1fdf582e5229546c418
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/db/postgres/test_unloader.py
|
ellyteitsworth/records-mover
|
21cd56efc2d23cfff04ec1fdf582e5229546c418
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from records_mover.db.postgres.unloader import PostgresUnloader
from records_mover.records import DelimitedRecordsFormat
from mock import MagicMock, Mock, patch, ANY
class TestPostgresUnloader(unittest.TestCase):
def setUp(self):
self.mock_url_resolver = Mock(name='url_resolver')
self.mock_db = MagicMock(name='db')
self.unloader = PostgresUnloader(self.mock_db)
@patch('records_mover.db.postgres.unloader.quote_value')
@patch('records_mover.db.postgres.unloader.copy_to')
@patch('records_mover.db.postgres.unloader.complain_on_unhandled_hints')
@patch('records_mover.db.postgres.unloader.Table')
@patch('records_mover.db.postgres.unloader.postgres_copy_to_options')
def test_unload(self,
mock_postgres_copy_to_options,
mock_Table,
mock_complain_on_unhandled_hints,
mock_copy_to,
mock_quote_value):
mock_schema = Mock(name='schema')
mock_table = Mock(name='table')
mock_unload_plan = Mock(name='unload_plan')
mock_directory = MagicMock(name='directory')
mock_records_format = Mock(name='records_format',
spec=DelimitedRecordsFormat)
mock_records_format.hints = {}
mock_unload_plan.records_format = mock_records_format
mock_date_output_style = "DATE_OUTPUT_STYLE"
mock_date_order_style = "DATE_ORDER_STYLE"
mock_postgres_options = {
'abc': 123
}
mock_postgres_copy_to_options.return_value = (
mock_date_output_style,
mock_date_order_style,
mock_postgres_options,
)
mock_quote_value.return_value = "ABC"
self.unloader.unload(mock_schema,
mock_table,
mock_unload_plan,
mock_directory)
mock_processing_instructions = mock_unload_plan.processing_instructions
mock_unhandled_hints = set(mock_records_format.hints.keys())
mock_complain_on_unhandled_hints.\
assert_called_with(mock_processing_instructions.fail_if_dont_understand,
mock_unhandled_hints,
mock_records_format.hints)
mock_table_obj = mock_Table.return_value
mock_Table.assert_called_with(mock_table,
ANY,
schema=mock_schema,
autoload=True,
autoload_with=self.mock_db)
mock_conn = self.mock_db.engine.begin.return_value.__enter__.return_value
mock_quote_value.assert_called_with(mock_conn, 'DATE_OUTPUT_STYLE, DATE_ORDER_STYLE')
mock_conn.execute.assert_called_with('SET LOCAL DateStyle = ABC')
mock_fileobj = mock_directory.loc.file_in_this_directory.return_value.open.\
return_value.__enter__.return_value
mock_copy_to.assert_called_with(mock_table_obj.select.return_value,
mock_fileobj,
mock_conn,
abc=123)
@patch('records_mover.db.postgres.unloader.quote_value')
@patch('records_mover.db.postgres.unloader.copy_to')
@patch('records_mover.db.postgres.unloader.complain_on_unhandled_hints')
@patch('records_mover.db.postgres.unloader.Table')
@patch('records_mover.db.postgres.unloader.postgres_copy_to_options')
def test_unload_default_date_order_style(self,
mock_postgres_copy_to_options,
mock_Table,
mock_complain_on_unhandled_hints,
mock_copy_to,
mock_quote_value):
mock_schema = Mock(name='schema')
mock_table = Mock(name='table')
mock_unload_plan = Mock(name='unload_plan')
mock_directory = MagicMock(name='directory')
mock_records_format = Mock(name='records_format',
spec=DelimitedRecordsFormat)
mock_records_format.hints = {}
mock_unload_plan.records_format = mock_records_format
mock_date_output_style = "DATE_OUTPUT_STYLE"
mock_date_order_style = None
mock_postgres_options = {
'abc': 123
}
mock_postgres_copy_to_options.return_value = (
mock_date_output_style,
mock_date_order_style,
mock_postgres_options,
)
mock_quote_value.return_value = "ABC"
self.unloader.unload(mock_schema,
mock_table,
mock_unload_plan,
mock_directory)
mock_processing_instructions = mock_unload_plan.processing_instructions
mock_unhandled_hints = set(mock_records_format.hints.keys())
mock_complain_on_unhandled_hints.\
assert_called_with(mock_processing_instructions.fail_if_dont_understand,
mock_unhandled_hints,
mock_records_format.hints)
mock_table_obj = mock_Table.return_value
mock_Table.assert_called_with(mock_table,
ANY,
schema=mock_schema,
autoload=True,
autoload_with=self.mock_db)
mock_conn = self.mock_db.engine.begin.return_value.__enter__.return_value
mock_quote_value.assert_called_with(mock_conn, 'DATE_OUTPUT_STYLE, MDY')
mock_conn.execute.assert_called_with('SET LOCAL DateStyle = ABC')
mock_fileobj = mock_directory.loc.file_in_this_directory.return_value.open.\
return_value.__enter__.return_value
mock_copy_to.assert_called_with(mock_table_obj.select.return_value,
mock_fileobj,
mock_conn,
abc=123)
@patch('records_mover.db.postgres.unloader.complain_on_unhandled_hints')
@patch('records_mover.db.postgres.unloader.postgres_copy_to_options')
def test_can_unload_this_format_true(self,
mock_postgres_copy_to_options,
mock_complain_on_unhandled_hints):
source_records_format = Mock(name='source_records_format',
spec=DelimitedRecordsFormat)
source_records_format.hints = {}
out = self.unloader.can_unload_this_format(source_records_format)
self.assertTrue(out)
def test_best_records_format(self):
self.assertEqual(DelimitedRecordsFormat(variant='bluelabs',
hints={
'compression': None
}),
self.unloader.best_records_format())
| 50.125
| 93
| 0.592823
| 730
| 7,218
| 5.384932
| 0.119178
| 0.066141
| 0.046299
| 0.072755
| 0.840753
| 0.823455
| 0.819893
| 0.811498
| 0.811498
| 0.811498
| 0
| 0.002523
| 0.341092
| 7,218
| 143
| 94
| 50.475524
| 0.824012
| 0
| 0
| 0.77037
| 0
| 0
| 0.129122
| 0.088667
| 0
| 0
| 0
| 0
| 0.088889
| 1
| 0.037037
| false
| 0
| 0.02963
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
945daf87d28a2ed629a052cfc6bed1b2c979d89a
| 109
|
py
|
Python
|
technocup/2017/elimination_round_3/place.py
|
dluschan/olymp
|
dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7
|
[
"MIT"
] | null | null | null |
technocup/2017/elimination_round_3/place.py
|
dluschan/olymp
|
dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7
|
[
"MIT"
] | null | null | null |
technocup/2017/elimination_round_3/place.py
|
dluschan/olymp
|
dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7
|
[
"MIT"
] | 1
|
2018-09-14T18:50:48.000Z
|
2018-09-14T18:50:48.000Z
|
n, m, k = map(int, input().split())
print((k + 2*m - 1) // (2*m), (k % 2*m + 1) // 2, 'L' if k % 2 else 'R')
| 36.333333
| 72
| 0.412844
| 25
| 109
| 1.8
| 0.56
| 0.133333
| 0.133333
| 0.177778
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085366
| 0.247706
| 109
| 2
| 73
| 54.5
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0.018349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
947c9256fca1d8ebc606d4a9dc7634d3a31aed08
| 20,200
|
py
|
Python
|
test.py
|
kushalkolar/NuSeT
|
cea354634fe432027f5103752f323791b78e3afe
|
[
"MIT"
] | 18
|
2019-10-17T23:47:03.000Z
|
2022-03-28T14:37:02.000Z
|
test.py
|
kushalkolar/NuSeT
|
cea354634fe432027f5103752f323791b78e3afe
|
[
"MIT"
] | 4
|
2020-08-18T20:02:03.000Z
|
2022-02-23T00:16:45.000Z
|
test.py
|
kushalkolar/NuSeT
|
cea354634fe432027f5103752f323791b78e3afe
|
[
"MIT"
] | 8
|
2019-11-22T06:20:57.000Z
|
2021-11-08T20:32:57.000Z
|
import tensorflow as tf
import numpy as np
import csv
from PIL import Image
from tqdm import tqdm
from skimage.transform import rescale
from model_layers.models import UNET
from model_layers.model_RPN import RPN
from model_layers.anchor_size import anchor_size
from model_layers.rpn_target import RPNTarget
from model_layers.rpn_proposal import RPNProposal
from model_layers.rpn_loss import RPNLoss
from model_layers.seg_loss import segmentation_loss
from model_layers.marker_watershed import marker_watershed
from model_layers.compute_metrics import compute_metrics
from utils.load_data import load_data_test
from utils.tf_utils import optimizer_fun
from utils.anchors import generate_anchors_reference
from utils.generate_anchors import generate_anchors
from utils.test import generate_gt_boxes
from utils.normalization import whole_image_norm, foreground_norm, clean_image
from utils.losses import smooth_l1_loss
from utils.image_vis import draw_rpn_bbox_pred, draw_gt_boxes, draw_top_nms_proposals, draw_rpn_bbox_targets, draw_rpn_bbox_pred_only
# inspired from https://github.com/tryolabs/luminoth/blob/master/luminoth/models/fasterrcnn/rpn_test.py
def test(params, self):
"""Predict masks for all images in a given directory, and save them
Args:
params (dict): the parameters of the network
"""
# Get the testing parameters
perform_watershed = params['watershed']
bbox_min_score = params['min_score']
nms_thresh = params['nms_threshold']
postProcess = params['postProcess']
resize_scale = params['scale_ratio']
# Load the data
# x_test, y_test: test images and corresponding labels
x_id, x_test = load_data_test(self.batch_seg_path)
# pred_dict and pred_dict_final save all the temp variables
pred_dict_final = {}
train_initial = tf.placeholder(dtype=tf.float32, shape=[1, None, None, 1])
input_shape = tf.shape(train_initial)
input_height = input_shape[1]
input_width = input_shape[2]
im_shape = tf.cast([input_height, input_width], tf.float32)
# number of classes needed to be classified, for our case this equals to 2
# (foreground and background)
nb_classes = 2
# feed the initial image to U-Net, we expect 2 outputs:
# 1. feat_map of shape (?,hf,wf,1024), which will be passed to the
# region proposal network
# 2. final_logits of shape(?,h,w,2), which is the prediction from U-net
with tf.variable_scope('model_U-Net') as scope:
final_logits, feat_map = UNET(nb_classes, train_initial)
# The final_logits has 2 channels for foreground/background softmax scores,
# then we get prediction with larger score for each pixel
pred_masks = tf.argmax(final_logits, axis=3)
pred_masks = tf.reshape(pred_masks,[input_height,input_width])
pred_masks = tf.to_float(pred_masks)
# Dynamic anchor base size calculated from median cell lengths
base_size = anchor_size(tf.reshape(pred_masks,[input_height,input_width]))
# scales and ratios are used to generate different anchors
scales = np.array([ 0.5, 1, 2])
ratios = np.array([ 0.125, 0.25, 0.5, 1, 2, 4, 8])
# stride is to control how sparse we want to place anchors across the image
# stride = 16 means to place an anchor every 16 pixels on the original image
stride = 16
# Generate the anchor reference with respect to the original image
ref_anchors = generate_anchors_reference(base_size, ratios, scales)
num_ref_anchors = scales.shape[0] * ratios.shape[0]
feat_height = input_height / stride
feat_width = input_width / stride
# Generate all the anchors based on ref_anchors
all_anchors = generate_anchors(ref_anchors, stride, [feat_height,feat_width])
num_anchors = all_anchors.shape[0]
with tf.variable_scope('model_RPN') as scope:
prediction_dict = RPN(feat_map, num_ref_anchors)
# Get the tensors from the dict
rpn_cls_prob = prediction_dict['rpn_cls_prob']
rpn_bbox_pred = prediction_dict['rpn_bbox_pred']
proposal_prediction = RPNProposal(rpn_cls_prob, rpn_bbox_pred, all_anchors, im_shape, nms_thresh)
pred_dict_final['all_anchors'] = tf.cast(all_anchors, tf.float32)
prediction_dict['proposals'] = proposal_prediction['proposals']
prediction_dict['scores'] = proposal_prediction['scores']
pred_dict_final['rpn_prediction'] = prediction_dict
scores = pred_dict_final['rpn_prediction']['scores']
proposals = pred_dict_final['rpn_prediction']['proposals']
pred_masks_watershed = tf.to_float(marker_watershed(scores, proposals, pred_masks, min_score = bbox_min_score))
# start point for testing, and end point for graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
num_batches_test = len(x_test)
saver = tf.train.Saver()
masks1 = []
# Restore the per-image normalization model from the trained network
saver.restore(sess,'./Network/whole_norm.ckpt')
sess.run(tf.local_variables_initializer())
for j in tqdm(range(0,num_batches_test)):
# whole image normalization
batch_data = x_test[j]
batch_data_shape = batch_data.shape
image = np.reshape(batch_data, [batch_data_shape[0],batch_data_shape[1]])
if resize_scale != 1:
image = rescale(image, self.params['scale_ratio'], anti_aliasing=True)
# Clip the height and width to be 16-fold
imheight, imwidth = image.shape
imheight = imheight//16*16
imwidth = imwidth//16*16
image = image[:imheight, :imwidth]
image_normalized_wn = whole_image_norm(image)
image_normalized_wn = np.reshape(image_normalized_wn, [1,imheight,imwidth,1])
masks = sess.run(pred_masks, feed_dict={train_initial:image_normalized_wn})
if not self.usingCL:
self.progress_var.set(j/2/num_batches_test*100)
self.window.update()
# First pass, get the coarse masks, and normalize the image on masks
masks1.append(masks)
# Restore the foreground normalization model from the trained network
saver.restore(sess,'./Network/foreground.ckpt')
sess.run(tf.local_variables_initializer())
for j in tqdm(range(0,num_batches_test)):
batch_data = x_test[j]
batch_data_shape = batch_data.shape
image = np.reshape(batch_data, [batch_data_shape[0],batch_data_shape[1]])
if resize_scale != 1:
image = rescale(image, self.params['scale_ratio'])
# Clip the height and width to be 16-fold
imheight, imwidth = image.shape
imheight = imheight//16*16
imwidth = imwidth//16*16
image = image[:imheight, :imwidth]
# Final pass, foreground normalization to get final masks
image_normalized_fg = foreground_norm(image, masks1[j])
image_normalized_fg = np.reshape(image_normalized_fg, [1,imheight,imwidth,1])
# If adding watershed, we save the watershed masks separately
if perform_watershed == 'yes':
masks_watershed = sess.run(pred_masks_watershed, feed_dict={train_initial:image_normalized_fg})
if postProcess == 'yes':
masks_watershed = clean_image(masks_watershed)
# Revert the scale to original display
if resize_scale != 1:
masks_watershed = rescale(masks_watershed, 1/self.params['scale_ratio'])
I8 = (((masks_watershed - masks_watershed.min()) / (masks_watershed.max() - masks_watershed.min())) * 255).astype(np.uint8)
img = Image.fromarray(I8)
img.save(self.batch_seg_path + x_id[j] + '_masks_watershed.png')
else:
masks = sess.run(pred_masks, feed_dict={train_initial:image_normalized_fg})
if postProcess == 'yes':
masks = clean_image(masks)
# enable these 2 lines if your want to see the detection result
#image_pil = draw_top_nms_proposals(pred_dict, batch_data, min_score=bbox_min_score, draw_gt=False)
#image_pil.save(str(j)+'_pred.png')
# Revert the scale to original display
if resize_scale != 1:
masks = rescale(masks, 1/self.params['scale_ratio'])
I8 = (((masks - masks.min()) / (masks.max() - masks.min())) * 255).astype(np.uint8)
img = Image.fromarray(I8)
img.save(self.batch_seg_path + x_id[j] + '_masks.png')
if not self.usingCL:
self.progress_var.set(50 + j/2/num_batches_test*100)
self.window.update()
sess.close()
# This function is similar to the function above, but only for one image that is
# displayed on NuSeT GUI
def test_single_img(params, x_test):
"""input the image, return the segmented mask
Args:
params (dict): the parameters of the network
x_test: the input image in numpy array
"""
# Get the testing parameters
perform_watershed = params['watershed']
bbox_min_score = params['min_score']
nms_thresh = params['nms_threshold']
postProcess = params['postProcess']
# pred_dict and pred_dict_final save all the temp variables
pred_dict_final = {}
train_initial = tf.placeholder(dtype=tf.float32, shape=[1, None, None, 1])
input_shape = tf.shape(train_initial)
input_height = input_shape[1]
input_width = input_shape[2]
im_shape = tf.cast([input_height, input_width], tf.float32)
# number of classes needed to be classified, for our case this equals to 2
# (foreground and background)
nb_classes = 2
# feed the initial image to U-Net, we expect 2 outputs:
# 1. feat_map of shape (?,32,32,1024), which will be passed to the
# region proposal network
# 2. final_logits of shape(?,512,512,2), which is the prediction from U-net
with tf.variable_scope('model_U-Net') as scope:
final_logits, feat_map = UNET(nb_classes, train_initial)
# The final_logits has 2 channels for foreground/background softmax scores,
# then we get prediction with larger score for each pixel
pred_masks = tf.argmax(final_logits, axis=3)
pred_masks = tf.reshape(pred_masks,[input_height,input_width])
pred_masks = tf.to_float(pred_masks)
# Dynamic anchor base size calculated from median cell lengths
base_size = anchor_size(tf.reshape(pred_masks,[input_height,input_width]))
# scales and ratios are used to generate different anchors
scales = np.array([ 0.5, 1, 2])
ratios = np.array([ 0.125, 0.25, 0.5, 1, 2, 4, 8])
# stride is to control how sparse we want to place anchors across the image
# stride = 16 means to place an anchor every 16 pixels on the original image
stride = 16
# Generate the anchor reference with respect to the original image
ref_anchors = generate_anchors_reference(base_size, ratios, scales)
num_ref_anchors = scales.shape[0] * ratios.shape[0]
feat_height = input_height / stride
feat_width = input_width / stride
# Generate all the anchors based on ref_anchors
all_anchors = generate_anchors(ref_anchors, stride, [feat_height,feat_width])
num_anchors = all_anchors.shape[0]
with tf.variable_scope('model_RPN') as scope:
prediction_dict = RPN(feat_map, num_ref_anchors)
# Get the tensors from the dict
rpn_cls_prob = prediction_dict['rpn_cls_prob']
rpn_bbox_pred = prediction_dict['rpn_bbox_pred']
proposal_prediction = RPNProposal(rpn_cls_prob, rpn_bbox_pred, all_anchors, im_shape, nms_thresh)
pred_dict_final['all_anchors'] = tf.cast(all_anchors, tf.float32)
prediction_dict['proposals'] = proposal_prediction['proposals']
prediction_dict['scores'] = proposal_prediction['scores']
pred_dict_final['rpn_prediction'] = prediction_dict
scores = pred_dict_final['rpn_prediction']['scores']
proposals = pred_dict_final['rpn_prediction']['proposals']
pred_masks_watershed = tf.to_float(marker_watershed(scores, proposals, pred_masks, min_score = bbox_min_score))
# start point for testing, and end point for graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
num_batches_test = len(x_test)
saver = tf.train.Saver()
masks1 = []
# Restore the per-image normalization model from the trained network
saver.restore(sess,'./Network/whole_norm.ckpt')
sess.run(tf.local_variables_initializer())
for j in tqdm(range(0,num_batches_test)):
# whole image normalization
batch_data = x_test[j]
batch_data_shape = batch_data.shape
image_normalized_wn = whole_image_norm(batch_data)
image_normalized_wn = np.reshape(image_normalized_wn, [1,batch_data_shape[0],batch_data_shape[1],1])
masks = sess.run(pred_masks, feed_dict={train_initial:image_normalized_wn})
# First pass, get the coarse masks, and normalize the image on masks
masks1.append(masks)
# Restore the foreground normalization model from the trained network
saver.restore(sess,'./Network/foreground.ckpt')
#saver.restore(sess,'./Network/fg_norm_weights_fluorescent/'+str(30)+'.ckpt')
sess.run(tf.local_variables_initializer())
for j in tqdm(range(0,num_batches_test)):
batch_data = x_test[j]
batch_data_shape = batch_data.shape
image = np.reshape(batch_data, [batch_data_shape[0],batch_data_shape[1]])
# Final pass, foreground normalization to get final masks
image_normalized_fg = foreground_norm(image, masks1[j])
image_normalized_fg = np.reshape(image_normalized_fg, [1,batch_data_shape[0],batch_data_shape[1],1])
# If adding watershed, we save the watershed masks separately
if perform_watershed == 'yes':
masks = sess.run(pred_masks_watershed, feed_dict={train_initial:image_normalized_fg})
if postProcess == 'yes':
masks = clean_image(masks)
else:
masks = sess.run(pred_masks, feed_dict={train_initial:image_normalized_fg})
if postProcess == 'yes':
masks = clean_image(masks)
sess.close()
return masks
def test_UNet(params, self):
"""Predict masks for all images in a given directory, and save them
Args:
params (dict): the parameters of the network
"""
postProcess = params['postProcess']
resize_scale = params['scale_ratio']
# Load the data
# x_test, y_test: test images and corresponding labels
x_id, x_test = load_data_test(self.batch_seg_path)
# pred_dict and pred_dict_final save all the temp variables
pred_dict_final = {}
train_initial = tf.placeholder(dtype=tf.float32, shape=[1, None, None, 1])
input_shape = tf.shape(train_initial)
input_height = input_shape[1]
input_width = input_shape[2]
im_shape = tf.cast([input_height, input_width], tf.float32)
# number of classes needed to be classified, for our case this equals to 2
# (foreground and background)
nb_classes = 2
# feed the initial image to U-Net, we expect 2 outputs:
# 1. feat_map of shape (?,hf,wf,1024), which will be passed to the
# region proposal network
# 2. final_logits of shape(?,h,w,2), which is the prediction from U-net
with tf.variable_scope('model_U-Net') as scope:
final_logits, feat_map = UNET(nb_classes, train_initial)
# The final_logits has 2 channels for foreground/background softmax scores,
# then we get prediction with larger score for each pixel
pred_masks = tf.argmax(final_logits, axis=3)
pred_masks = tf.reshape(pred_masks,[input_height,input_width])
pred_masks = tf.to_float(pred_masks)
# start point for testing, and end point for graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
num_batches_test = len(x_test)
saver = tf.train.Saver()
# Restore the per-image normalization model from the trained network
saver.restore(sess,'./Network/UNet.ckpt')
sess.run(tf.local_variables_initializer())
for j in tqdm(range(0,num_batches_test)):
# whole image normalization
batch_data = x_test[j]
batch_data_shape = batch_data.shape
image = np.reshape(batch_data, [batch_data_shape[0],batch_data_shape[1]])
if resize_scale != 1:
image = rescale(image, self.params['scale_ratio'], anti_aliasing=True)
# Clip the height and width to be 16-fold
imheight, imwidth = image.shape
imheight = imheight//16*16
imwidth = imwidth//16*16
image = image[:imheight, :imwidth]
image_normalized_wn = whole_image_norm(image)
image_normalized_wn = np.reshape(image_normalized_wn, [1,imheight,imwidth,1])
masks = sess.run(pred_masks, feed_dict={train_initial:image_normalized_wn})
if not self.usingCL:
self.progress_var.set(j/num_batches_test*100)
self.window.update()
if postProcess == 'yes':
masks = clean_image(masks)
# Revert the scale to original display
if resize_scale != 1:
masks = rescale(masks, 1/self.params['scale_ratio'])
I8 = (((masks - masks.min()) / (masks.max() - masks.min())) * 255).astype(np.uint8)
img = Image.fromarray(I8)
img.save(self.batch_seg_path + x_id[j] + '_masks.png')
sess.close()
# This function is similar to the function above, but only for one image that is
# displayed on NuSeT GUI
def test_single_img_UNet(params, x_test):
"""input the image, return the segmented mask
Args:
params (dict): the parameters of the network
x_test: the input image in numpy array
"""
# Get the testing parameters
postProcess = params['postProcess']
# pred_dict and pred_dict_final save all the temp variables
pred_dict_final = {}
train_initial = tf.placeholder(dtype=tf.float32, shape=[1, None, None, 1])
input_shape = tf.shape(train_initial)
input_height = input_shape[1]
input_width = input_shape[2]
im_shape = tf.cast([input_height, input_width], tf.float32)
# number of classes needed to be classified, for our case this equals to 2
# (foreground and background)
nb_classes = 2
# feed the initial image to U-Net, we expect 2 outputs:
# 1. feat_map of shape (?,32,32,1024), which will be passed to the
# region proposal network
# 2. final_logits of shape(?,512,512,2), which is the prediction from U-net
with tf.variable_scope('model_U-Net') as scope:
final_logits, feat_map = UNET(nb_classes, train_initial)
# The final_logits has 2 channels for foreground/background softmax scores,
# then we get prediction with larger score for each pixel
pred_masks = tf.argmax(final_logits, axis=3)
pred_masks = tf.reshape(pred_masks,[input_height,input_width])
pred_masks = tf.to_float(pred_masks)
# start point for testing, and end point for graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
num_batches_test = len(x_test)
saver = tf.train.Saver()
masks1 = []
# Restore the per-image normalization model from the trained network
saver.restore(sess,'./Network/UNet.ckpt')
sess.run(tf.local_variables_initializer())
for j in tqdm(range(0,num_batches_test)):
# whole image normalization
batch_data = x_test[j]
batch_data_shape = batch_data.shape
image_normalized_wn = whole_image_norm(batch_data)
image_normalized_wn = np.reshape(image_normalized_wn, [1,batch_data_shape[0],batch_data_shape[1],1])
masks = sess.run(pred_masks, feed_dict={train_initial:image_normalized_wn})
if postProcess == 'yes':
masks = clean_image(masks)
sess.close()
return masks
| 38.697318
| 135
| 0.683663
| 2,822
| 20,200
| 4.665131
| 0.106662
| 0.026662
| 0.027649
| 0.015951
| 0.899658
| 0.898139
| 0.898139
| 0.890771
| 0.888188
| 0.882947
| 0
| 0.017212
| 0.226287
| 20,200
| 521
| 136
| 38.771593
| 0.825133
| 0.271238
| 0
| 0.856618
| 0
| 0
| 0.048402
| 0.006875
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.084559
| 0
| 0.106618
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
846a6de367f961a87b8b506bb0aad795c93373d5
| 331
|
py
|
Python
|
authentication/forms.py
|
darkcloudb/DJANGO-twitterclone
|
1c6c05ce09fa8f780b35927a6c28d3dcdf74e640
|
[
"MIT"
] | null | null | null |
authentication/forms.py
|
darkcloudb/DJANGO-twitterclone
|
1c6c05ce09fa8f780b35927a6c28d3dcdf74e640
|
[
"MIT"
] | null | null | null |
authentication/forms.py
|
darkcloudb/DJANGO-twitterclone
|
1c6c05ce09fa8f780b35927a6c28d3dcdf74e640
|
[
"MIT"
] | null | null | null |
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(max_length=30)
password = forms.CharField(widget=forms.PasswordInput)
class SignUpForm(forms.Form):
username = forms.CharField(max_length=30)
password = forms.CharField(widget=forms.PasswordInput)
# email = forms.EmailField()
| 25.461538
| 58
| 0.749245
| 39
| 331
| 6.307692
| 0.461538
| 0.227642
| 0.138211
| 0.178862
| 0.715447
| 0.715447
| 0.715447
| 0.715447
| 0.715447
| 0.715447
| 0
| 0.014134
| 0.145015
| 331
| 12
| 59
| 27.583333
| 0.855124
| 0.07855
| 0
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.285714
| 0.142857
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
84afab2f163d6f2d83a9d7686cf213e1c2dac5c2
| 114,271
|
py
|
Python
|
kmip/tests/unit/core/objects/test_objects.py
|
openstack/deb-python-kmip
|
f86134878b5f558b39f51e67a6e6ba5a0b03e222
|
[
"Apache-2.0"
] | 12
|
2016-09-14T21:59:10.000Z
|
2020-03-11T07:37:25.000Z
|
kmip/tests/unit/core/objects/test_objects.py
|
openstack/deb-python-kmip
|
f86134878b5f558b39f51e67a6e6ba5a0b03e222
|
[
"Apache-2.0"
] | null | null | null |
kmip/tests/unit/core/objects/test_objects.py
|
openstack/deb-python-kmip
|
f86134878b5f558b39f51e67a6e6ba5a0b03e222
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import string_types
import testtools
from testtools import TestCase
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import AttributeType
from kmip.core.enums import BlockCipherMode
from kmip.core.enums import HashingAlgorithm as HashingAlgorithmEnum
from kmip.core.enums import KeyRoleType
from kmip.core.enums import PaddingMethod
from kmip.core.enums import Tags
from kmip.core.factories.attributes import AttributeValueFactory
from kmip.core import objects
from kmip.core.objects import Attribute
from kmip.core.objects import ExtensionName
from kmip.core.objects import ExtensionTag
from kmip.core.objects import ExtensionType
from kmip.core.objects import KeyMaterialStruct
from kmip.core import utils
from kmip.core.utils import BytearrayStream
class TestAttributeClass(TestCase):
"""
A test suite for the Attribute class
"""
def setUp(self):
super(TestAttributeClass, self).setUp()
name_a = 'CRYPTOGRAPHIC PARAMETERS'
name_b = 'CRYPTOGRAPHIC ALGORITHM'
self.attribute_name_a = Attribute.AttributeName(name_a)
self.attribute_name_b = Attribute.AttributeName(name_b)
self.factory = AttributeValueFactory()
self.attribute_value_a = self.factory.create_attribute_value(
AttributeType.CRYPTOGRAPHIC_PARAMETERS,
{'block_cipher_mode': BlockCipherMode.CBC,
'padding_method': PaddingMethod.PKCS5,
'hashing_algorithm': HashingAlgorithmEnum.SHA_1,
'key_role_type': KeyRoleType.BDK})
self.attribute_value_b = self.factory.create_attribute_value(
AttributeType.CRYPTOGRAPHIC_PARAMETERS,
{'block_cipher_mode': BlockCipherMode.CCM,
'padding_method': PaddingMethod.PKCS5,
'hashing_algorithm': HashingAlgorithmEnum.SHA_1,
'key_role_type': KeyRoleType.BDK})
index_a = 2
index_b = 3
self.attribute_index_a = Attribute.AttributeIndex(index_a)
self.attribute_index_b = Attribute.AttributeIndex(index_b)
self.attributeObj_a = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_a)
self.attributeObj_b = Attribute(
attribute_name=self.attribute_name_b,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_a)
self.attributeObj_c = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_b,
attribute_index=self.attribute_index_a)
self.attributeObj_d = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_b)
self.key_req_with_crypt_params = BytearrayStream((
b'\x42\x00\x08\x01\x00\x00\x00\x78\x42\x00\x0a\x07\x00\x00\x00\x18'
b'\x43\x52\x59\x50\x54\x4f\x47\x52\x41\x50\x48\x49\x43\x20\x50\x41'
b'\x52\x41\x4d\x45\x54\x45\x52\x53'
b'\x42\x00\x09\x02\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x0b\x01\x00\x00\x00\x40'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x5f\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x38\x05\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x00'
b'\x42\x00\x83\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
))
def tearDown(self):
super(TestAttributeClass, self).tearDown()
def test_read(self):
attrObj = Attribute()
attrObj.read(self.key_req_with_crypt_params)
self.assertEqual(self.attributeObj_a, attrObj)
def test_write(self):
attrObj = Attribute(self.attribute_name_a, self.attribute_index_a,
self.attribute_value_a)
ostream = BytearrayStream()
attrObj.write(ostream)
self.assertEqual(self.key_req_with_crypt_params, ostream)
def test_equal_on_equal(self):
self.assertFalse(self.attributeObj_a == self.attributeObj_b)
self.assertFalse(self.attributeObj_a == self.attributeObj_c)
self.assertFalse(self.attributeObj_a == self.attributeObj_d)
def test_not_equal_on_not_equal(self):
self.assertTrue(self.attributeObj_a != self.attributeObj_b)
class TestKeyMaterialStruct(TestCase):
"""
A test suite for the KeyMaterialStruct.
A placeholder test suite. Should be removed when KeyMaterialStruct is
removed from the code base.
"""
def setUp(self):
super(TestKeyMaterialStruct, self).setUp()
def tearDown(self):
super(TestKeyMaterialStruct, self).tearDown()
def test_valid_tag(self):
"""
Test that the KeyMaterialStruct tag is valid.
"""
struct = KeyMaterialStruct()
self.assertEqual(Tags.KEY_MATERIAL, struct.tag)
class TestExtensionName(TestCase):
"""
A test suite for the ExtensionName class.
Since ExtensionName is a simple wrapper for the TextString primitive, only
a few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionName, self).setUp()
def tearDown(self):
super(TestExtensionName, self).tearDown()
def _test_init(self, value):
if (isinstance(value, string_types)) or (value is None):
extension_name = ExtensionName(value)
if value is None:
value = ''
msg = "expected {0}, observed {1}".format(
value, extension_name.value)
self.assertEqual(value, extension_name.value, msg)
else:
self.assertRaises(TypeError, ExtensionName, value)
def test_init_with_none(self):
"""
Test that an ExtensionName object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionName object can be constructed with a valid
string value.
"""
self._test_init("valid")
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-string value is
used to construct an ExtensionName object.
"""
self._test_init(0)
class TestExtensionTag(TestCase):
"""
A test suite for the ExtensionTag class.
Since ExtensionTag is a simple wrapper for the Integer primitive, only a
few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionTag, self).setUp()
def tearDown(self):
super(TestExtensionTag, self).tearDown()
def _test_init(self, value):
if (isinstance(value, int)) or (value is None):
extension_tag = ExtensionTag(value)
if value is None:
value = 0
msg = "expected {0}, observed {1}".format(
value, extension_tag.value)
self.assertEqual(value, extension_tag.value, msg)
else:
self.assertRaises(TypeError, ExtensionTag, value)
def test_init_with_none(self):
"""
Test that an ExtensionTag object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionTag object can be constructed with a valid
integer value.
"""
self._test_init(0)
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-integer value is
used to construct an ExtensionName object.
"""
self._test_init("invalid")
class TestExtensionType(TestCase):
"""
A test suite for the ExtensionType class.
Since ExtensionType is a simple wrapper for the Integer primitive, only a
few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionType, self).setUp()
def tearDown(self):
super(TestExtensionType, self).tearDown()
def _test_init(self, value):
if (isinstance(value, int)) or (value is None):
extension_type = ExtensionType(value)
if value is None:
value = 0
msg = "expected {0}, observed {1}".format(
value, extension_type.value)
self.assertEqual(value, extension_type.value, msg)
else:
self.assertRaises(TypeError, ExtensionType, value)
def test_init_with_none(self):
"""
Test that an ExtensionType object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionType object can be constructed with a valid
integer value.
"""
self._test_init(0)
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-string value is
used to construct an ExtensionType object.
"""
self._test_init("invalid")
class TestEncryptionKeyInformation(testtools.TestCase):
"""
Test suite for the EncryptionKeyInformation struct.
"""
def setUp(self):
super(TestEncryptionKeyInformation, self).setUp()
# Encoding obtained from the KMIP 1.1 testing document, Section 14.1.
#
# This encoding matches the following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.full_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
self.partial_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestEncryptionKeyInformation, self).tearDown()
def test_init(self):
"""
Test that an EncryptionKeyInformation struct can be constructed with
no arguments.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
def test_init_with_args(self):
"""
Test that an EncryptionKeyInformation struct can be constructed with
valid values.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=cryptographic_parameters
)
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
encryption_key_information.unique_identifier
)
self.assertIsInstance(
encryption_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
parameters = encryption_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.CTR,
parameters.block_cipher_mode
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of an EncryptionKeyInformation struct.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
objects.EncryptionKeyInformation,
**kwargs
)
encryption_key_information = objects.EncryptionKeyInformation()
args = (encryption_key_information, 'unique_identifier', 0)
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_cryptographic_parameters(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the cryptographic parameters of an EncryptionKeyInformation struct.
"""
kwargs = {'cryptographic_parameters': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
objects.EncryptionKeyInformation,
**kwargs
)
encryption_key_information = objects.EncryptionKeyInformation()
args = (
encryption_key_information,
'cryptographic_parameters',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
setattr,
*args
)
def test_read(self):
"""
Test that an EncryptionKeyInformation struct can be read from a data
stream.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
encryption_key_information.read(self.full_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
encryption_key_information.unique_identifier
)
self.assertIsInstance(
encryption_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
cryptographic_parameters = \
encryption_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
cryptographic_parameters.block_cipher_mode
)
def test_read_partial(self):
"""
Test that an EncryptionKeyInformation struct can be read from a partial
data stream.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
encryption_key_information.read(self.partial_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
encryption_key_information.unique_identifier
)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
EncryptionKeyInformation field is missing from the struct encoding.
"""
encryption_key_information = objects.EncryptionKeyInformation()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
encryption_key_information.read,
*args
)
def test_write(self):
"""
Test that an EncryptionKeyInformation struct can be written to a data
stream.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
stream = BytearrayStream()
encryption_key_information.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined EncryptionKeyInformation struct can be
written to a data stream.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
stream = BytearrayStream()
encryption_key_information.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
EncryptionKeyInformation field is missing when encoding the struct.
"""
encryption_key_information = objects.EncryptionKeyInformation()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
encryption_key_information.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
EncryptionKeyInformation structs with the same data.
"""
a = objects.EncryptionKeyInformation()
b = objects.EncryptionKeyInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different unique identifiers.
"""
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different cryptographic
parameters.
"""
a = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different types.
"""
a = objects.EncryptionKeyInformation()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
EncryptionKeyInformation structs with the same data.
"""
a = objects.EncryptionKeyInformation()
b = objects.EncryptionKeyInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different unique identifiers.
"""
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different cryptographic
parameters.
"""
a = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different types.
"""
a = objects.EncryptionKeyInformation()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an EncryptionKeyInformation struct.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
expected = (
"EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None))"
)
observed = repr(encryption_key_information)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to an EncryptionKeyInformation struct.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
expected = str({
'unique_identifier': "100182d5-72b8-47aa-8383-4d97d512e98a",
'cryptographic_parameters': cryptographic_parameters
})
observed = str(encryption_key_information)
self.assertEqual(expected, observed)
class TestMACSignatureKeyInformation(testtools.TestCase):
"""
Test suite for the MACSignatureKeyInformation struct.
"""
def setUp(self):
super(TestMACSignatureKeyInformation, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Section 14.1. The rest of the encoding was built by hand.
#
# This encoding matches the following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.full_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
self.partial_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestMACSignatureKeyInformation, self).tearDown()
def test_init(self):
"""
Test that a MACSignatureKeyInformation struct can be constructed with
no arguments.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
def test_init_with_args(self):
"""
Test that a MACSignatureKeyInformation struct can be constructed with
valid values.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=cryptographic_parameters
)
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
mac_signature_key_information.unique_identifier
)
self.assertIsInstance(
mac_signature_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
parameters = mac_signature_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.CTR,
parameters.block_cipher_mode
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a MACSignatureKeyInformation struct.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
objects.MACSignatureKeyInformation,
**kwargs
)
args = (objects.MACSignatureKeyInformation(), 'unique_identifier', 0)
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_cryptographic_parameters(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the cryptographic parameters of a MACSignatureKeyInformation struct.
"""
kwargs = {'cryptographic_parameters': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
objects.MACSignatureKeyInformation,
**kwargs
)
args = (
objects.MACSignatureKeyInformation(),
'cryptographic_parameters',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
setattr,
*args
)
def test_read(self):
"""
Test that a MACSignatureKeyInformation struct can be read from a data
stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
mac_signature_key_information.read(self.full_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
mac_signature_key_information.unique_identifier
)
self.assertIsInstance(
mac_signature_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
cryptographic_parameters = \
mac_signature_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
cryptographic_parameters.block_cipher_mode
)
def test_read_partial(self):
"""
Test that a MACSignatureKeyInformation struct can be read from a
partial data stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
mac_signature_key_information.read(self.partial_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing from the struct encoding.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
mac_signature_key_information.read,
*args
)
def test_write(self):
"""
Test that a MACSignatureKeyInformation struct can be written to a data
stream.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
stream = BytearrayStream()
mac_signature_key_information.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined MACSignatureKeyInformation struct can be
written to a data stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
stream = BytearrayStream()
mac_signature_key_information.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing when encoding the struct.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
mac_signature_key_information.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
MACSignatureKeyInformation structs with the same data.
"""
a = objects.MACSignatureKeyInformation()
b = objects.MACSignatureKeyInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different unique identifiers.
"""
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different cryptographic
parameters.
"""
a = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different types.
"""
a = objects.MACSignatureKeyInformation()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
MACSignatureKeyInformation structs with the same data.
"""
a = objects.MACSignatureKeyInformation()
b = objects.MACSignatureKeyInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different unique identifiers.
"""
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different cryptographic
parameters.
"""
a = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different types.
"""
a = objects.MACSignatureKeyInformation()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an MACSignatureKeyInformation struct.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
expected = (
"MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None))"
)
observed = repr(mac_signature_key_information)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a MACSignatureKeyInformation struct.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
expected = str({
'unique_identifier': "100182d5-72b8-47aa-8383-4d97d512e98a",
'cryptographic_parameters': cryptographic_parameters
})
observed = str(mac_signature_key_information)
self.assertEqual(expected, observed)
class TestKeyWrappingData(testtools.TestCase):
"""
Test suite for the KeyWrappingData struct.
"""
def setUp(self):
super(TestKeyWrappingData, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Sections 14.1. The rest was built by hand.
#
# This encoding matches the following set of values:
#
# Wrapping Method - ENCRYPT
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature - 0x0123456789ABCDEF
# IV/Counter/Nonce - 0x01
# Encoding Option - NO_ENCODING
self.full_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\xE0'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4D\x08\x00\x00\x00\x08\x01\x23\x45\x67\x89\xAB\xCD\xEF'
b'\x42\x00\x3D\x08\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 14.1.
# This encoding matches the following set of values:
#
# Wrapping Method - ENCRYPT
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# Encoding Option - NO_ENCODING
self.partial_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\x70'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestKeyWrappingData, self).tearDown()
def test_init(self):
"""
Test that a KeyWrappingData struct can be constructed with no
arguments.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
def test_init_with_args(self):
"""
Test that a KeyWrappingData struct can be constructed with valid
values.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="12345678-9012-3456-7890-123456789012",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01',
iv_counter_nonce=b'\x02',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"12345678-9012-3456-7890-123456789012",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.CTR,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_data.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_data.mac_signature_key_information
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertEqual(b'\x01', key_wrapping_data.mac_signature)
self.assertEqual(b'\x02', key_wrapping_data.iv_counter_nonce)
self.assertEqual(
enums.EncodingOption.TTLV_ENCODING,
key_wrapping_data.encoding_option
)
def test_invalid_wrapping_method(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the wrapping method of a KeyWrappingData struct.
"""
kwargs = {'wrapping_method': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
objects.KeyWrappingData,
**kwargs
)
args = (objects.KeyWrappingData(), 'wrapping_method', 0)
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
setattr,
*args
)
def test_invalid_encryption_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encryption key information of a KeyWrappingData struct.
"""
kwargs = {'encryption_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'encryption_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
setattr,
*args
)
def test_invalid_mac_signature_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature key information of a KeyWrappingData struct.
"""
kwargs = {'mac_signature_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'mac_signature_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
setattr,
*args
)
def test_invalid_mac_signature(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature of a KeyWrappingData struct.
"""
kwargs = {'mac_signature': 0}
self.assertRaisesRegexp(
TypeError,
"MAC/signature must be bytes.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'mac_signature',
0
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature must be bytes.",
setattr,
*args
)
def test_invalid_iv_counter_nonce(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the IV/counter/nonce of a KeyWrappingData struct.
"""
kwargs = {'iv_counter_nonce': 0}
self.assertRaisesRegexp(
TypeError,
"IV/counter/nonce must be bytes.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'iv_counter_nonce',
0
)
self.assertRaisesRegexp(
TypeError,
"IV/counter/nonce must be bytes.",
setattr,
*args
)
def test_invalid_encoding_option(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encoding option of a KeyWrappingData struct.
"""
kwargs = {'encoding_option': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'encoding_option',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
setattr,
*args
)
def test_read(self):
"""
Test that a KeyWrappingData struct can be read from a data stream.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
key_wrapping_data.read(self.full_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_data.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_data.mac_signature_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertEqual(
b'\x01\x23\x45\x67\x89\xAB\xCD\xEF',
key_wrapping_data.mac_signature
)
self.assertEqual(
b'\x01',
key_wrapping_data.iv_counter_nonce
)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_data.encoding_option
)
def test_read_partial(self):
"""
Test that a KeyWrappingData struct can be read from a partial data
stream.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
key_wrapping_data.read(self.partial_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsNone(key_wrapping_data.mac_signature_key_information)
self.assertIsNone(key_wrapping_data.mac_signature)
self.assertIsNone(key_wrapping_data.iv_counter_nonce)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_data.encoding_option
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required KeyWrappingData
field is missing from the struct encoding.
"""
key_wrapping_data = objects.KeyWrappingData()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_data.read,
*args
)
def test_write(self):
"""
Test that a KeyWrappingData struct can be written to a data stream.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x23\x45\x67\x89\xAB\xCD\xEF',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_data.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined KeyWrappingData struct can be written to
a data stream.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_data.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required KeyWrappingData
field is missing when encoding the struct.
"""
key_wrapping_data = objects.KeyWrappingData()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_data.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
KeyWrappingData structs with the same data.
"""
a = objects.KeyWrappingData()
b = objects.KeyWrappingData()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_wrapping_method(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different wrapping methods.
"""
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encryption_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different encryption key information.
"""
a = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different MAC/signature key information.
"""
a = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signatures(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different MAC/signatures.
"""
a = objects.KeyWrappingData(mac_signature=b'\x01')
b = objects.KeyWrappingData(mac_signature=b'\x10')
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_iv_counter_nonce(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different IV/counter/nonces.
"""
a = objects.KeyWrappingData(iv_counter_nonce=b'\x01')
b = objects.KeyWrappingData(iv_counter_nonce=b'\x10')
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encoding_option(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different encoding options.
"""
a = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different types.
"""
a = objects.KeyWrappingData()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
KeyWrappingData structs with the same data.
"""
a = objects.KeyWrappingData()
b = objects.KeyWrappingData()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_wrapping_method(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different wrapping methods.
"""
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encryption_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different encryption key information.
"""
a = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different MAC/signature key information.
"""
a = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signatures(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different MAC/signatures.
"""
a = objects.KeyWrappingData(mac_signature=b'\x01')
b = objects.KeyWrappingData(mac_signature=b'\x10')
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_iv_counter_nonce(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different IV/counter/nonces.
"""
a = objects.KeyWrappingData(iv_counter_nonce=b'\x01')
b = objects.KeyWrappingData(iv_counter_nonce=b'\x10')
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encoding_option(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different encoding options.
"""
a = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different types.
"""
a = objects.KeyWrappingData()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an KeyWrappingData struct.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
mac_signature=b'\x01\x01\x02\x02\x03\x03\x04\x04',
iv_counter_nonce=b'\xFF',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = (
"KeyWrappingData("
"wrapping_method=WrappingMethod.ENCRYPT, "
"encryption_key_information=EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-ffff-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.NIST_KEY_WRAP, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature_key_information=MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature={0}, "
"iv_counter_nonce={1}, "
"encoding_option=EncodingOption.TTLV_ENCODING)".format(
b'\x01\x01\x02\x02\x03\x03\x04\x04',
b'\xFF'
)
)
observed = repr(key_wrapping_data)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a KeyWrappingData struct.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
mac_signature=b'\x01\x01\x02\x02\x03\x03\x04\x04',
iv_counter_nonce=b'\xFF',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = str({
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
'mac_signature_key_information':
objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
'mac_signature': b'\x01\x01\x02\x02\x03\x03\x04\x04',
'iv_counter_nonce': b'\xFF',
'encoding_option': enums.EncodingOption.TTLV_ENCODING
})
observed = str(key_wrapping_data)
self.assertEqual(expected, observed)
class TestKeyWrappingSpecification(testtools.TestCase):
"""
Test suite for the KeyWrappingSpecification struct.
"""
def setUp(self):
super(TestKeyWrappingSpecification, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Sections 14.1 and 14.2. The rest was built by hand.
#
# This encoding matches the following set of values:
#
# Wrapping Method - Encrypt
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# Attribute Names
# Cryptographic Usage Mask
# Encoding Option - NO_ENCODING
self.full_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\xE0'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x0A\x07\x00\x00\x00\x18'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x55\x73'
b'\x61\x67\x65\x20\x4D\x61\x73\x6B'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
#
# Wrapping Method - Encrypt
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.partial_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\x60'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestKeyWrappingSpecification, self).tearDown()
def test_init(self):
"""
Test that a KeyWrappingSpecification struct can be constructed with
no arguments.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
def test_init_with_args(self):
"""
Test that a KeyWrappingSpecification struct can be constructed with
valid values.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="12345678-9012-3456-7890-123456789012",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR
)
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=encryption_key_information,
mac_signature_key_information=mac_signature_key_information,
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length',
'Cryptographic Usage Mask'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"12345678-9012-3456-7890-123456789012",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.CTR,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_specification.mac_signature_key_information
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.attribute_names,
list
)
self.assertEqual(3, len(key_wrapping_specification.attribute_names))
self.assertEqual(
'Cryptographic Algorithm',
key_wrapping_specification.attribute_names[0]
)
self.assertEqual(
'Cryptographic Length',
key_wrapping_specification.attribute_names[1]
)
self.assertEqual(
'Cryptographic Usage Mask',
key_wrapping_specification.attribute_names[2]
)
self.assertEqual(
enums.EncodingOption.TTLV_ENCODING,
key_wrapping_specification.encoding_option
)
def test_invalid_wrapping_method(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the wrapping method of a KeyWrappingSpecification struct.
"""
kwargs = {'wrapping_method': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (objects.KeyWrappingSpecification(), 'wrapping_method', 0)
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
setattr,
*args
)
def test_invalid_encryption_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encryption key information of a KeyWrappingSpecification struct.
"""
kwargs = {'encryption_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'encryption_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
setattr,
*args
)
def test_invalid_mac_signature_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature key information of a KeyWrappingSpecification
struct.
"""
kwargs = {'mac_signature_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'mac_signature_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
setattr,
*args
)
def test_invalid_attribute_names(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the attribute names of a KeyWrappingSpecification struct.
"""
kwargs = {'attribute_names': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Attribute names must be a list of strings.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'attribute_names',
['valid', 0]
)
self.assertRaisesRegexp(
TypeError,
"Attribute names must be a list of strings.",
setattr,
*args
)
def test_invalid_encoding_option(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encoding option of a KeyWrappingSpecification struct.
"""
kwargs = {'encoding_option': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'encoding_option',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
setattr,
*args
)
def test_read(self):
"""
Test that a KeyWrappingSpecification struct can be read from a data
stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
key_wrapping_specification.read(self.full_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_specification.mac_signature_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.attribute_names,
list
)
self.assertEqual(
'Cryptographic Usage Mask',
key_wrapping_specification.attribute_names[0]
)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_specification.encoding_option
)
def test_read_partial(self):
"""
Test that a KeyWrappingSpecification struct can be read from a
partial data stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
key_wrapping_specification.read(self.partial_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsNone(
key_wrapping_specification.mac_signature_key_information
)
self.assertIsNone(
key_wrapping_specification.attribute_names
)
self.assertIsNone(
key_wrapping_specification.encoding_option
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing from the struct encoding.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_specification.read,
*args
)
def test_write(self):
"""
Test that a KeyWrappingSpecification struct can be written to a data
stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_specification.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined KeyWrappingSpecification struct can be
written to a data stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
stream = BytearrayStream()
key_wrapping_specification.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
KeyWrappingSpecification field is missing when encoding the struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_specification.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
KeyWrappingSpecification structs with the same data.
"""
a = objects.KeyWrappingSpecification()
b = objects.KeyWrappingSpecification()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_wrapping_method(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different wrapping methods.
"""
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encryption_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different encryption key
information.
"""
a = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different MAC/signature key
information.
"""
a = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_attribute_names(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different attribute names.
"""
a = objects.KeyWrappingSpecification(
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
]
)
b = objects.KeyWrappingSpecification(
attribute_names=['Cryptographic Usage Mask']
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encoding_option(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different encoding options.
"""
a = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different types.
"""
a = objects.KeyWrappingSpecification()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
KeyWrappingSpecification structs with the same data.
"""
a = objects.KeyWrappingSpecification()
b = objects.KeyWrappingSpecification()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_wrapping_method(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different wrapping methods.
"""
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encryption_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different encryption key
information.
"""
a = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different MAC/signature key
information.
"""
a = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_attribute_names(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different attribute names.
"""
a = objects.KeyWrappingSpecification(
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
]
)
b = objects.KeyWrappingSpecification(
attribute_names=['Cryptographic Usage Mask']
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encoding_option(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different encoding options.
"""
a = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different types.
"""
a = objects.KeyWrappingSpecification()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an KeyWrappingSpecification struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = (
"KeyWrappingSpecification("
"wrapping_method=WrappingMethod.ENCRYPT, "
"encryption_key_information=EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-ffff-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.NIST_KEY_WRAP, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature_key_information=MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"attribute_names=["
"'Cryptographic Algorithm', 'Cryptographic Length'], "
"encoding_option=EncodingOption.TTLV_ENCODING)"
)
observed = repr(key_wrapping_specification)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a KeyWrappingSpecification struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = str({
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
'mac_signature_key_information':
objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
'attribute_names': [
'Cryptographic Algorithm',
'Cryptographic Length'
],
'encoding_option': enums.EncodingOption.TTLV_ENCODING
})
observed = str(key_wrapping_specification)
self.assertEqual(expected, observed)
| 37.125081
| 79
| 0.631324
| 11,043
| 114,271
| 6.339401
| 0.036131
| 0.022627
| 0.019413
| 0.035654
| 0.952047
| 0.936777
| 0.917321
| 0.898394
| 0.88571
| 0.874668
| 0
| 0.063677
| 0.291141
| 114,271
| 3,077
| 80
| 37.137147
| 0.800573
| 0.141436
| 0
| 0.748624
| 0
| 0.020642
| 0.163015
| 0.119852
| 0
| 0
| 0
| 0
| 0.143119
| 1
| 0.062385
| false
| 0
| 0.009174
| 0
| 0.075688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0481b565feae0501b91ed08bf219cd44b07aab36
| 152
|
py
|
Python
|
plugins/netmiko/komand_netmiko/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/netmiko/komand_netmiko/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/netmiko/komand_netmiko/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .configuration_commands.action import ConfigurationCommands
from .show_commands.action import ShowCommands
| 38
| 64
| 0.848684
| 19
| 152
| 6.684211
| 0.789474
| 0.220472
| 0.314961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111842
| 152
| 3
| 65
| 50.666667
| 0.940741
| 0.243421
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
04c39917a382b74f20d5e9f540d0b7a860688f90
| 10,898
|
py
|
Python
|
kbsbot/context_management/tests/test_utils.py
|
astandre/cb-context-management-ms
|
031ea7bd9a7770d5617cb9145d7fc94a0730495a
|
[
"MIT"
] | null | null | null |
kbsbot/context_management/tests/test_utils.py
|
astandre/cb-context-management-ms
|
031ea7bd9a7770d5617cb9145d7fc94a0730495a
|
[
"MIT"
] | null | null | null |
kbsbot/context_management/tests/test_utils.py
|
astandre/cb-context-management-ms
|
031ea7bd9a7770d5617cb9145d7fc94a0730495a
|
[
"MIT"
] | null | null | null |
import unittest
from kbsbot.context_management.utils import *
class TestUtils(unittest.TestCase):
def test_get_entities(self):
local_interactions = [
{
"date": "Mon, 03 Feb 2020 14:33:41 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output":
{
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
{
"type": "http://127.0.0.1/ockb/course/ontology/Course",
"value": "http://127.0.0.1/ockb/resources/EAIG5"
}
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
}
,
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:35:47 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output":
{
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
{
"type": "http://127.0.0.1/ockb/course/ontology/Course",
"value": "http://127.0.0.1/ockb/resources/EAIG5"
}
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
}
,
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:37:44 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output": {},
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:39:16 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output": {
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
{
"type": "http://127.0.0.1/ockb/course/ontology/Course",
"value": "http://127.0.0.1/ockb/resources/EAIG5"
}
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
},
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:40:42 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Que cursos hay"
},
"output": {
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
{
"type": "http://127.0.0.1/ockb/course/ontology/Course",
"value": "http://127.0.0.1/ockb/resources/EAIG5"
}
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
},
"social_network": 1,
"user": 1
}
]
requires = ["http://127.0.0.1/ockb/course/ontology/Course"]
result = get_entities(local_interactions, requires)
self.assertTrue(len(result) > 0)
local_interactions = [
{
"date": "Mon, 03 Feb 2020 14:33:41 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output":
{
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
}
,
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:35:47 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output":
{
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
}
,
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:37:44 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output": {},
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:39:16 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Cuando inicia el curso de huertos familiares"
},
"output": {
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
},
"social_network": 1,
"user": 1
},
{
"date": "Mon, 03 Feb 2020 14:40:42 GMT",
"input": {
"context": {
"entities": [],
"intent": None
},
"user_input": "Que cursos hay"
},
"output": {
"answer": {
"answer_type": "text",
"text": " El curso denominado emprendimiento y generación de ideas se oferta con la finalidad de desarrollar conocimientos, identificar y potenciar oportunidades, para emprender e innovar en el ámbito personal, social, laboral o productivo."
},
"context": {
"entities": [
],
"intent": "http://127.0.0.1/ockb/resources/ObtenerInformacion"
}
},
"social_network": 1,
"user": 1
}
]
requires = ["http://127.0.0.1/ockb/course/ontology/Course"]
result = get_entities(local_interactions, requires)
self.assertTrue(len(result) == 0)
| 42.737255
| 269
| 0.379244
| 805
| 10,898
| 5.088199
| 0.11677
| 0.065918
| 0.035156
| 0.039551
| 0.974609
| 0.974609
| 0.974609
| 0.974609
| 0.974609
| 0.974609
| 0
| 0.048734
| 0.521747
| 10,898
| 254
| 270
| 42.905512
| 0.737145
| 0
| 0
| 0.645161
| 0
| 0.032258
| 0.401358
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 1
| 0.004032
| false
| 0
| 0.008065
| 0
| 0.016129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6ba1aa4e056bd3d9565770507c3ae49bf367a18
| 29,451
|
py
|
Python
|
src/guidelines/external_contig.py
|
noemiefedon/BELLA
|
ca86e5cd6f593478235c64aa4d0409b0e78dbcbb
|
[
"MIT"
] | null | null | null |
src/guidelines/external_contig.py
|
noemiefedon/BELLA
|
ca86e5cd6f593478235c64aa4d0409b0e78dbcbb
|
[
"MIT"
] | null | null | null |
src/guidelines/external_contig.py
|
noemiefedon/BELLA
|
ca86e5cd6f593478235c64aa4d0409b0e78dbcbb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Function to ensure external sorting for contiguity
'across adjacent sublaminates'
Created on Mon Jan 29 12:00:18 2018
@author: Noemie Fedon
"""
import numpy as np
def external_contig(angle, n_plies_group, constraints, ss_before, angle2 = None):
'''
returns only the stacking sequences that satisfy constraints concerning
contiguity at the junction with an adjacent group of plies, but not within the
group of plies
OUTPUTS
- angle: the selected sublaminate stacking sequences line by
line
- angle2: the selected sublaminate stacking sequences line by
line if a second sublaminate is given as input for angle2
INPUTS
- angle: the first sublaminate stacking sequences
- angle:2 matrix storing the second sublaminate stacking sequences
- ss_before is the stacking sequence of the sublaminate adjacent to the first
sublaminate
'''
if angle.ndim == 1:
angle = angle.reshape((1, angle.size))
ss_beforeLength = ss_before.size
# CHECK FOR CORRECT INPUTS SIZE
if n_plies_group > angle.shape[1]:
raise Exception('The input set of angles have fewer elements that what is asked to be checked')
if angle2 is None:
# TO ENSURE CONTIGUITY
if constraints.contig:
# To ensure the contiguity constraint at the junction of ply groups
if ss_beforeLength>=1:
if constraints.n_contig ==2:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 3:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 4:
if n_plies_group>3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>4:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]\
and angle[ii, 4] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>5:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1] \
and angle[ii, 4] == ss_before[-1] \
and angle[ii, 5] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=2:
if constraints.n_contig ==2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 0] == ss_before[-2]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 3:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 4:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>4:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1] \
and angle[ii, 4] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=3:
if constraints.n_contig == 2:
pass
elif constraints.n_contig == 3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 4:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=4:
if constraints.n_contig ==2:
pass
elif constraints.n_contig == 3:
pass
elif constraints.n_contig == 4:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=5:
if constraints.n_contig ==2:
pass
elif constraints.n_contig == 3:
pass
elif constraints.n_contig == 4:
pass
elif constraints.n_contig == 5:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 0] == ss_before[-5]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 0] == ss_before[-5] \
and angle[ii, 1] == ss_before[-5]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=6:
if constraints.n_contig == 2:
pass
elif constraints.n_contig == 3:
pass
elif constraints.n_contig == 4:
pass
elif constraints.n_contig == 5:
pass
elif constraints.n_contig == 6:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 0] == ss_before[-5] \
and ss_before[-6] == ss_before[-5]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
else:
# TO ENSURE CONTIGUITY
if constraints.contig:
# To ensure the contiguity constraint at the junction of ply groups
if ss_beforeLength>=1:
if constraints.n_contig ==2:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 3:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 4:
if n_plies_group>3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>4:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]\
and angle[ii, 4] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>5:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1] \
and angle[ii, 4] == ss_before[-1] \
and angle[ii, 5] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=2:
if constraints.n_contig ==2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 0] == ss_before[-2]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 3:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 4:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>4:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1] \
and angle[ii, 4] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=3:
if constraints.n_contig == 2:
pass
elif constraints.n_contig == 3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 4:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>3:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1] \
and angle[ii, 3] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=4:
if constraints.n_contig ==2:
pass
elif constraints.n_contig == 3:
pass
elif constraints.n_contig == 4:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 5:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>2:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 1] == ss_before[-1] \
and angle[ii, 2] == ss_before[-1]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=5:
if constraints.n_contig ==2:
pass
elif constraints.n_contig == 3:
pass
elif constraints.n_contig == 4:
pass
elif constraints.n_contig == 5:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 0] == ss_before[-5]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
elif constraints.n_contig == 6:
if n_plies_group>1:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 0] == ss_before[-5] \
and angle[ii, 1] == ss_before[-5]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
if ss_beforeLength>=6:
if constraints.n_contig == 2:
pass
elif constraints.n_contig == 3:
pass
elif constraints.n_contig == 4:
pass
elif constraints.n_contig == 5:
pass
elif constraints.n_contig == 6:
a = angle.shape[0]
for ii in range(a)[::-1]:
if angle[ii, 0] == ss_before[-1] \
and ss_before[-4] == ss_before[-1] \
and ss_before[-3] == ss_before[-1] \
and ss_before[-2] == ss_before[-1] \
and angle[ii, 0] == ss_before[-5] \
and ss_before[-6] == ss_before[-5]:
angle = np.delete(angle, np.s_[ii], axis=0)
continue
else:
raise Exception(
'constraints.n_contig must be 2, 3, 4 or 5')
return angle, angle2
if __name__ == "__main__":
'Test'
import sys
sys.path.append(r'C:\BELLA')
from src.LAYLA_V02.constraints import Constraints
from src.divers.pretty_print import print_ss, print_list_ss
constraints = Constraints()
constraints.contig = True
constraints.n_contig = 2
print('*** Test for the function external_contig ***\n')
print('Input stacking sequences:\n')
ss = np.array([[-45, -45, 0, 45, 90], [0, 45, 45, 45, 45],[0, 0, 0, 45, 45]])
print_list_ss(ss)
print('Stacking sequence of adajacent sublaminate:\n')
ss_before = np.array([-45])
print_ss(ss_before)
n_plies_group = 5
middle_ply = 0
test, _ = external_contig(ss, n_plies_group, constraints, ss_before, ss)
if test.shape[0]:
print('Stacking sequences satisfying the rule:\n')
print_list_ss(test)
else:
print('No stacking sequence satisfy the rule\n')
| 40.014946
| 104
| 0.368612
| 3,078
| 29,451
| 3.375893
| 0.045484
| 0.190934
| 0.145511
| 0.157059
| 0.873833
| 0.873833
| 0.868059
| 0.868059
| 0.858628
| 0.858628
| 0
| 0.048981
| 0.525143
| 29,451
| 736
| 105
| 40.014946
| 0.694029
| 0.032325
| 0
| 0.940075
| 0
| 0
| 0.028364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001873
| false
| 0.037453
| 0.007491
| 0
| 0.011236
| 0.016854
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b6bb15d9d1c1238ff8ec41f15e5776493ca189dc
| 525,541
|
py
|
Python
|
causal_networkx/ci/tests/testdata.py
|
adam2392/causal-networkx
|
aba5355d2e900b30dd3d99916981674f3c0074e9
|
[
"BSD-3-Clause"
] | null | null | null |
causal_networkx/ci/tests/testdata.py
|
adam2392/causal-networkx
|
aba5355d2e900b30dd3d99916981674f3c0074e9
|
[
"BSD-3-Clause"
] | null | null | null |
causal_networkx/ci/tests/testdata.py
|
adam2392/causal-networkx
|
aba5355d2e900b30dd3d99916981674f3c0074e9
|
[
"BSD-3-Clause"
] | null | null | null |
"""The data included in this file is the same data distributed with
the pcalg package for R developed by Markus Kalisch, Alain Hauser,
Martin Maechler, Diego Colombo, Doris Entner, Patrik Hoyer, Antti
Hyttinen, and Jonas Peters.
License: GPLv2.
"""
bin_data = [
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
1,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
bin_answer = [
9.95394730483e-256,
6.9654995846e-250,
2.70986437702e-244,
1.66028307209e-137,
4.5256578439e-134,
]
dis_data = [
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
3,
0,
1,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
1,
0,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
3,
1,
0,
1,
0,
2,
1,
0,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
3,
0,
2,
0,
1,
2,
0,
2,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
0,
0,
1,
0,
1,
0,
0,
2,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
2,
0,
2,
3,
1,
0,
0,
1,
3,
1,
0,
0,
2,
2,
1,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
0,
1,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
2,
0,
1,
0,
0,
0,
2,
0,
2,
1,
2,
2,
1,
2,
1,
1,
2,
0,
2,
1,
2,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
2,
1,
0,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
1,
1,
1,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
1,
0,
1,
1,
1,
0,
1,
0,
3,
0,
0,
1,
1,
2,
0,
2,
0,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
0,
3,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
0,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
1,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
1,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
0,
2,
1,
1,
1,
1,
2,
0,
1,
0,
0,
1,
0,
0,
2,
1,
2,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
1,
2,
2,
1,
0,
0,
1,
2,
0,
1,
0,
2,
1,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
1,
1,
0,
2,
1,
0,
0,
1,
2,
0,
2,
0,
0,
2,
1,
0,
0,
2,
2,
1,
0,
1,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
3,
1,
1,
0,
2,
1,
1,
2,
1,
1,
2,
1,
1,
0,
0,
0,
1,
2,
0,
0,
0,
0,
2,
0,
1,
2,
0,
2,
0,
1,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
3,
1,
2,
1,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
1,
0,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
0,
1,
1,
2,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
3,
0,
1,
0,
2,
0,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
3,
1,
1,
0,
2,
1,
0,
2,
0,
2,
2,
0,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
2,
2,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
2,
1,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
3,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
1,
0,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
1,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
2,
1,
0,
1,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
0,
0,
0,
1,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
1,
2,
1,
1,
3,
0,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
1,
0,
0,
1,
1,
1,
2,
1,
0,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
0,
1,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
1,
3,
0,
2,
0,
1,
2,
1,
0,
0,
0,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
0,
1,
1,
2,
1,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
0,
2,
0,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
1,
0,
1,
0,
2,
0,
1,
1,
1,
2,
0,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
1,
0,
0,
2,
1,
1,
0,
0,
1,
0,
1,
2,
0,
2,
1,
1,
0,
0,
1,
2,
1,
1,
0,
2,
2,
0,
0,
0,
0,
3,
1,
1,
0,
2,
2,
1,
0,
0,
0,
1,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
0,
0,
1,
0,
1,
0,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
3,
1,
2,
1,
1,
3,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
0,
1,
0,
1,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
1,
0,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
1,
2,
0,
1,
2,
1,
2,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
0,
0,
0,
1,
2,
2,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
0,
2,
0,
1,
0,
0,
1,
0,
0,
3,
1,
2,
1,
1,
2,
1,
0,
0,
2,
2,
1,
2,
1,
1,
3,
0,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
0,
1,
1,
2,
2,
1,
0,
0,
0,
0,
0,
2,
0,
1,
0,
1,
0,
0,
0,
1,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
0,
0,
1,
2,
1,
1,
3,
0,
1,
0,
1,
0,
1,
2,
0,
2,
0,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
0,
2,
0,
1,
3,
1,
2,
0,
2,
2,
0,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
3,
1,
1,
0,
0,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
1,
2,
1,
2,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
1,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
3,
1,
2,
0,
1,
2,
0,
2,
0,
0,
0,
1,
2,
0,
2,
3,
1,
1,
0,
2,
3,
1,
2,
0,
0,
1,
1,
2,
0,
2,
3,
0,
1,
0,
2,
2,
0,
2,
0,
2,
0,
1,
1,
1,
1,
2,
1,
1,
0,
0,
3,
1,
2,
0,
1,
1,
1,
2,
1,
0,
2,
0,
1,
1,
2,
2,
1,
2,
0,
1,
1,
1,
0,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
1,
1,
1,
0,
2,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
0,
2,
0,
2,
3,
1,
2,
0,
1,
1,
1,
1,
1,
0,
2,
0,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
1,
0,
2,
3,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
0,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
0,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
1,
0,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
3,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
3,
1,
2,
1,
2,
2,
1,
2,
1,
2,
1,
1,
1,
0,
0,
2,
1,
0,
0,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
0,
0,
0,
0,
0,
1,
1,
1,
0,
2,
1,
1,
0,
0,
0,
0,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
0,
0,
2,
2,
1,
2,
0,
0,
0,
1,
1,
1,
1,
1,
1,
2,
0,
2,
3,
1,
0,
0,
1,
1,
0,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
0,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
1,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
0,
1,
1,
1,
1,
1,
2,
1,
1,
0,
0,
3,
1,
0,
0,
0,
1,
0,
2,
0,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
2,
0,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
0,
0,
0,
1,
0,
0,
3,
1,
1,
1,
0,
2,
1,
2,
1,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
0,
1,
0,
2,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
1,
1,
1,
0,
2,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
0,
1,
1,
1,
2,
1,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
1,
1,
0,
0,
0,
0,
0,
2,
0,
1,
3,
1,
1,
0,
1,
2,
0,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
0,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
1,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
2,
0,
0,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
1,
2,
3,
0,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
0,
1,
0,
1,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
1,
1,
1,
0,
2,
3,
1,
2,
0,
2,
0,
0,
2,
1,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
3,
1,
1,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
0,
2,
0,
0,
0,
0,
1,
0,
2,
3,
1,
1,
0,
2,
2,
1,
2,
0,
0,
3,
0,
1,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
3,
0,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
1,
2,
1,
0,
1,
0,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
0,
0,
0,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
1,
1,
0,
0,
3,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
2,
0,
0,
0,
0,
3,
1,
0,
1,
0,
2,
1,
2,
0,
2,
0,
0,
0,
1,
0,
2,
1,
1,
1,
1,
2,
0,
2,
0,
2,
2,
1,
2,
1,
1,
1,
1,
1,
1,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
0,
0,
2,
1,
0,
1,
1,
0,
0,
1,
2,
1,
0,
0,
2,
3,
0,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
1,
0,
0,
0,
0,
2,
1,
0,
0,
2,
1,
1,
2,
0,
2,
1,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
1,
1,
1,
1,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
2,
0,
0,
0,
1,
1,
0,
2,
0,
0,
1,
1,
1,
1,
1,
1,
1,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
1,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
2,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
0,
0,
1,
1,
1,
0,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
1,
1,
0,
0,
2,
0,
1,
1,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
0,
0,
2,
0,
0,
1,
1,
2,
0,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
2,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
0,
1,
0,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
3,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
1,
1,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
0,
1,
1,
1,
1,
2,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
3,
0,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
3,
1,
1,
0,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
0,
0,
2,
0,
1,
3,
1,
0,
0,
0,
2,
0,
0,
0,
0,
2,
1,
1,
0,
2,
0,
0,
2,
1,
0,
2,
1,
1,
0,
1,
1,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
0,
2,
0,
2,
0,
1,
2,
0,
0,
3,
1,
1,
0,
1,
0,
1,
1,
0,
2,
1,
1,
2,
0,
2,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
0,
1,
2,
1,
1,
2,
1,
2,
1,
0,
1,
0,
2,
0,
1,
3,
1,
2,
1,
1,
3,
1,
1,
1,
1,
2,
1,
0,
0,
2,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
0,
0,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
0,
0,
0,
1,
0,
1,
1,
1,
3,
0,
1,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
1,
0,
2,
1,
1,
1,
2,
2,
1,
0,
0,
2,
2,
1,
1,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
1,
0,
2,
1,
2,
0,
0,
0,
0,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
3,
1,
2,
1,
0,
2,
1,
1,
0,
0,
0,
1,
0,
0,
0,
3,
1,
0,
0,
1,
0,
0,
2,
0,
1,
0,
0,
2,
0,
0,
1,
0,
2,
0,
2,
0,
1,
2,
0,
0,
0,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
0,
0,
1,
1,
0,
1,
3,
1,
0,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
2,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
1,
1,
3,
0,
1,
0,
2,
2,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
2,
3,
1,
2,
0,
1,
0,
1,
1,
1,
1,
1,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
0,
1,
0,
3,
0,
2,
0,
2,
0,
1,
2,
1,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
1,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
0,
0,
2,
3,
1,
2,
0,
1,
1,
1,
2,
1,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
0,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
1,
0,
2,
0,
0,
2,
1,
1,
1,
2,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
0,
2,
0,
1,
0,
0,
2,
1,
2,
2,
1,
0,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
1,
0,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
1,
0,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
1,
1,
0,
2,
1,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
1,
1,
1,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
1,
1,
0,
1,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
0,
2,
1,
1,
2,
1,
1,
1,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
1,
1,
0,
2,
1,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
1,
0,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
0,
0,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
1,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
1,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
0,
1,
0,
2,
2,
0,
1,
0,
0,
3,
1,
0,
1,
0,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
1,
1,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
0,
0,
2,
1,
2,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
1,
1,
1,
0,
2,
0,
1,
2,
0,
0,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
1,
2,
2,
0,
0,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
2,
0,
1,
0,
0,
0,
0,
0,
2,
0,
1,
3,
1,
2,
0,
2,
0,
0,
2,
0,
2,
2,
1,
0,
0,
0,
0,
0,
2,
0,
2,
0,
0,
0,
0,
0,
1,
1,
2,
0,
2,
0,
0,
1,
0,
1,
3,
1,
1,
0,
0,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
0,
1,
1,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
0,
3,
1,
0,
1,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
1,
1,
3,
0,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
1,
1,
0,
1,
1,
1,
2,
1,
1,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
0,
0,
2,
0,
0,
1,
0,
2,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
0,
2,
0,
0,
1,
1,
1,
0,
1,
3,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
0,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
0,
1,
1,
2,
0,
1,
0,
0,
1,
0,
0,
3,
1,
2,
0,
0,
2,
0,
0,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
2,
1,
1,
2,
0,
2,
1,
1,
0,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
0,
0,
2,
1,
0,
1,
1,
1,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
1,
0,
2,
0,
2,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
1,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
0,
0,
2,
0,
2,
1,
0,
0,
0,
0,
3,
1,
1,
1,
0,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
0,
0,
2,
0,
1,
1,
0,
2,
2,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
2,
1,
1,
0,
2,
1,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
1,
2,
0,
0,
0,
1,
2,
0,
0,
3,
1,
1,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
1,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
0,
1,
1,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
0,
3,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
1,
1,
2,
3,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
0,
1,
1,
0,
1,
1,
2,
0,
0,
3,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
1,
1,
2,
0,
0,
3,
1,
1,
0,
2,
2,
1,
2,
0,
2,
1,
1,
1,
0,
0,
0,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
2,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
1,
0,
2,
1,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
2,
0,
2,
0,
0,
1,
1,
2,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
2,
0,
0,
3,
1,
2,
0,
2,
3,
0,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
1,
1,
0,
2,
1,
0,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
0,
1,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
0,
1,
1,
0,
0,
2,
0,
1,
0,
1,
2,
0,
2,
2,
0,
2,
1,
1,
2,
1,
2,
0,
1,
1,
1,
0,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
1,
1,
1,
0,
2,
0,
0,
0,
1,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
0,
2,
1,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
0,
0,
0,
1,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
0,
1,
0,
0,
2,
2,
0,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
0,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
1,
0,
1,
0,
0,
2,
0,
2,
1,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
2,
0,
0,
0,
1,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
1,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
1,
1,
0,
0,
0,
1,
0,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
3,
0,
0,
0,
0,
2,
1,
2,
0,
2,
0,
0,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
0,
2,
0,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
0,
3,
1,
1,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
1,
1,
0,
1,
2,
1,
1,
3,
0,
2,
0,
2,
2,
1,
1,
1,
1,
0,
1,
2,
1,
1,
2,
1,
0,
0,
0,
0,
1,
2,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
1,
0,
3,
0,
2,
1,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
3,
1,
2,
0,
0,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
2,
2,
1,
1,
1,
1,
1,
1,
2,
0,
1,
2,
0,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
0,
2,
0,
1,
3,
1,
2,
0,
0,
3,
1,
2,
0,
1,
3,
0,
1,
0,
1,
0,
0,
1,
1,
1,
2,
1,
2,
1,
2,
1,
1,
1,
0,
0,
0,
0,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
1,
0,
1,
0,
1,
0,
1,
2,
0,
0,
0,
1,
2,
0,
0,
2,
1,
0,
0,
0,
1,
0,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
0,
1,
1,
1,
3,
1,
1,
0,
1,
2,
1,
0,
0,
2,
0,
0,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
1,
0,
2,
1,
2,
1,
2,
2,
1,
0,
0,
0,
3,
0,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
1,
1,
1,
1,
2,
3,
0,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
3,
1,
0,
0,
0,
0,
0,
2,
0,
2,
1,
0,
2,
1,
2,
2,
1,
1,
1,
2,
1,
1,
2,
1,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
2,
2,
0,
2,
1,
1,
3,
1,
1,
0,
0,
0,
0,
2,
0,
2,
0,
1,
0,
1,
1,
3,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
1,
2,
0,
0,
0,
0,
0,
0,
2,
2,
1,
0,
0,
0,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
0,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
3,
1,
2,
1,
0,
2,
1,
2,
0,
1,
1,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
1,
2,
0,
2,
3,
1,
0,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
1,
0,
3,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
2,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
2,
1,
2,
1,
1,
3,
1,
0,
0,
0,
2,
1,
2,
1,
1,
3,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
3,
0,
2,
1,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
0,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
1,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
1,
1,
2,
0,
2,
3,
1,
2,
0,
2,
0,
0,
2,
0,
2,
0,
0,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
2,
1,
0,
0,
0,
1,
1,
1,
0,
1,
2,
0,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
1,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
1,
1,
1,
0,
2,
2,
1,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
1,
1,
2,
2,
1,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
0,
1,
0,
2,
1,
1,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
2,
3,
1,
1,
1,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
1,
3,
1,
2,
1,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
0,
1,
1,
3,
1,
0,
0,
0,
3,
1,
0,
0,
1,
3,
1,
2,
0,
0,
0,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
0,
2,
1,
2,
2,
1,
1,
0,
1,
1,
1,
2,
1,
1,
3,
0,
1,
0,
0,
3,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
0,
0,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
2,
0,
0,
2,
0,
0,
0,
0,
2,
0,
0,
0,
0,
1,
0,
1,
0,
0,
2,
0,
1,
0,
0,
0,
1,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
1,
0,
0,
0,
1,
2,
1,
1,
0,
2,
0,
1,
1,
0,
1,
1,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
3,
0,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
2,
0,
0,
1,
1,
1,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
1,
1,
0,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
0,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
2,
1,
1,
1,
0,
1,
0,
1,
3,
0,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
1,
1,
1,
1,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
0,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
0,
0,
1,
2,
0,
2,
1,
1,
2,
1,
2,
0,
0,
2,
0,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
2,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
1,
1,
3,
1,
0,
0,
1,
1,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
1,
1,
1,
3,
0,
1,
1,
1,
2,
1,
2,
0,
2,
2,
0,
2,
0,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
0,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
0,
1,
0,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
1,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
0,
0,
1,
0,
1,
2,
0,
2,
3,
0,
1,
0,
0,
0,
1,
1,
0,
0,
3,
0,
2,
0,
1,
2,
0,
2,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
1,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
0,
2,
1,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
1,
1,
2,
1,
2,
1,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
1,
0,
2,
0,
1,
1,
0,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
1,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
0,
1,
0,
2,
3,
0,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
1,
1,
0,
0,
2,
1,
1,
1,
1,
0,
0,
0,
3,
0,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
0,
0,
2,
1,
1,
2,
1,
2,
1,
0,
3,
1,
0,
1,
1,
3,
0,
1,
0,
0,
0,
1,
2,
1,
2,
3,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
2,
1,
0,
0,
2,
1,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
2,
1,
2,
3,
0,
1,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
0,
2,
1,
1,
3,
0,
2,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
1,
1,
0,
0,
1,
1,
0,
2,
1,
2,
1,
1,
2,
0,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
1,
1,
1,
1,
2,
0,
1,
2,
0,
1,
3,
0,
2,
1,
1,
0,
0,
1,
0,
2,
2,
1,
2,
0,
1,
0,
0,
1,
1,
1,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
1,
0,
0,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
3,
1,
1,
0,
0,
2,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
2,
1,
1,
0,
2,
0,
0,
0,
0,
1,
0,
0,
2,
0,
2,
2,
1,
1,
0,
1,
2,
0,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
0,
2,
1,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
2,
0,
0,
2,
0,
1,
2,
0,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
0,
0,
0,
1,
1,
1,
0,
1,
3,
1,
1,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
1,
0,
2,
0,
1,
0,
0,
2,
0,
2,
2,
0,
2,
0,
0,
3,
1,
2,
1,
1,
0,
1,
0,
0,
1,
3,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
0,
1,
2,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
1,
2,
0,
0,
2,
1,
1,
2,
1,
0,
0,
0,
3,
0,
2,
1,
2,
2,
0,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
1,
1,
2,
1,
2,
2,
0,
2,
0,
1,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
0,
2,
1,
2,
2,
1,
1,
0,
2,
0,
1,
1,
0,
2,
3,
1,
2,
0,
2,
1,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
1,
1,
2,
0,
0,
0,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
0,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
2,
1,
2,
2,
1,
0,
0,
0,
2,
0,
2,
0,
2,
2,
1,
1,
1,
1,
3,
1,
1,
1,
0,
2,
1,
2,
1,
2,
2,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
0,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
1,
1,
0,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
0,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
1,
1,
2,
0,
2,
2,
0,
2,
1,
2,
0,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
1,
1,
1,
2,
1,
1,
3,
0,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
1,
0,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
1,
0,
1,
3,
0,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
2,
1,
1,
1,
1,
1,
1,
1,
2,
1,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
2,
3,
0,
1,
0,
1,
0,
0,
2,
0,
2,
2,
0,
0,
1,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
0,
0,
1,
1,
1,
0,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
2,
2,
0,
0,
1,
1,
3,
1,
0,
1,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
3,
1,
1,
0,
1,
3,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
1,
0,
0,
3,
1,
1,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
1,
0,
2,
1,
0,
2,
1,
2,
0,
0,
1,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
2,
0,
0,
1,
0,
0,
1,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
3,
1,
2,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
0,
0,
0,
0,
1,
2,
1,
1,
2,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
1,
1,
0,
1,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
1,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
1,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
0,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
0,
3,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
2,
1,
2,
0,
1,
1,
1,
0,
1,
0,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
1,
1,
2,
0,
1,
1,
0,
2,
3,
0,
2,
0,
0,
0,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
0,
1,
2,
0,
1,
3,
0,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
1,
1,
2,
1,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
2,
0,
1,
1,
0,
0,
3,
1,
1,
0,
1,
0,
0,
1,
1,
2,
1,
1,
0,
0,
2,
2,
1,
2,
0,
0,
0,
0,
2,
1,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
0,
0,
1,
0,
0,
2,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
2,
0,
1,
3,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
1,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
2,
0,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
0,
0,
3,
1,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
0,
1,
1,
3,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
0,
0,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
1,
2,
1,
0,
1,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
1,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
0,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
0,
3,
1,
1,
0,
2,
2,
1,
1,
0,
2,
3,
1,
1,
0,
0,
2,
1,
1,
0,
1,
3,
1,
2,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
1,
1,
1,
0,
2,
0,
2,
2,
1,
2,
0,
2,
1,
1,
1,
0,
2,
2,
1,
1,
0,
2,
1,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
1,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
1,
1,
1,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
2,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
1,
0,
0,
1,
1,
0,
1,
3,
1,
0,
1,
0,
1,
0,
2,
0,
0,
3,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
0,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
2,
1,
1,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
0,
1,
0,
0,
0,
1,
1,
0,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
1,
1,
1,
0,
2,
2,
0,
0,
0,
0,
3,
0,
1,
1,
2,
3,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
0,
2,
1,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
1,
2,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
0,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
1,
3,
1,
2,
0,
0,
3,
1,
1,
0,
2,
1,
0,
2,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
3,
0,
2,
1,
1,
2,
0,
2,
0,
1,
2,
1,
0,
0,
0,
3,
0,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
1,
2,
2,
1,
0,
0,
0,
1,
1,
2,
1,
1,
0,
0,
2,
0,
2,
2,
1,
1,
0,
2,
3,
1,
1,
0,
2,
0,
1,
2,
1,
1,
3,
1,
2,
0,
1,
0,
1,
0,
1,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
0,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
1,
1,
1,
1,
2,
1,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
0,
0,
0,
0,
3,
1,
0,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
0,
0,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
1,
1,
2,
1,
2,
0,
0,
1,
1,
2,
0,
2,
3,
1,
0,
0,
1,
2,
1,
1,
0,
0,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
1,
0,
0,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
0,
2,
1,
0,
1,
0,
0,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
3,
0,
2,
0,
1,
3,
1,
1,
0,
0,
3,
1,
2,
0,
2,
0,
1,
0,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
0,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
1,
1,
2,
1,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
0,
2,
1,
0,
2,
1,
1,
0,
0,
2,
0,
0,
2,
0,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
2,
0,
2,
0,
1,
0,
0,
0,
1,
0,
1,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
2,
1,
1,
0,
0,
0,
0,
1,
2,
0,
0,
3,
1,
0,
0,
2,
0,
1,
2,
1,
2,
3,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
0,
0,
1,
1,
0,
2,
0,
0,
1,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
0,
0,
0,
2,
0,
1,
2,
1,
0,
1,
0,
0,
1,
1,
1,
2,
3,
1,
1,
0,
0,
0,
0,
2,
0,
2,
3,
1,
0,
0,
0,
3,
0,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
0,
1,
1,
0,
1,
3,
1,
1,
0,
0,
0,
0,
2,
0,
1,
2,
1,
0,
1,
1,
1,
1,
1,
1,
0,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
3,
1,
0,
0,
0,
2,
1,
0,
0,
2,
0,
0,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
1,
3,
1,
0,
0,
0,
0,
0,
1,
0,
0,
3,
1,
2,
0,
1,
3,
1,
2,
0,
2,
1,
1,
2,
1,
1,
2,
0,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
0,
3,
1,
1,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
3,
0,
1,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
0,
1,
1,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
0,
2,
0,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
1,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
0,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
1,
1,
0,
0,
0,
2,
0,
0,
3,
1,
0,
1,
2,
3,
1,
0,
0,
1,
0,
0,
2,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
0,
2,
0,
1,
2,
1,
0,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
1,
1,
2,
1,
2,
0,
2,
1,
1,
2,
1,
2,
2,
1,
2,
1,
2,
2,
1,
1,
0,
0,
3,
1,
1,
1,
2,
3,
0,
0,
1,
2,
2,
0,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
1,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
1,
1,
1,
0,
1,
3,
1,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
0,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
1,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
3,
1,
0,
0,
0,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
0,
0,
2,
1,
0,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
1,
1,
2,
0,
1,
0,
0,
2,
1,
2,
2,
1,
2,
1,
2,
3,
1,
1,
0,
0,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
3,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
0,
1,
0,
1,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
2,
0,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
0,
1,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
0,
2,
1,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
0,
0,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
1,
0,
2,
0,
0,
2,
1,
0,
0,
2,
0,
0,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
1,
0,
3,
0,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
0,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
3,
0,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
3,
1,
0,
1,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
3,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
0,
0,
1,
0,
1,
3,
1,
1,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
0,
1,
1,
2,
1,
1,
1,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
1,
1,
2,
1,
1,
1,
0,
1,
3,
0,
0,
1,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
1,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
1,
0,
2,
0,
1,
1,
0,
2,
3,
1,
1,
1,
0,
1,
1,
0,
1,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
0,
1,
0,
2,
2,
1,
1,
0,
2,
2,
0,
0,
0,
0,
2,
1,
0,
0,
0,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
1,
0,
2,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
1,
0,
0,
0,
1,
0,
2,
1,
1,
0,
1,
2,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
1,
1,
1,
0,
0,
2,
1,
1,
0,
1,
3,
0,
2,
0,
2,
0,
0,
2,
0,
2,
2,
0,
2,
0,
1,
3,
1,
2,
0,
2,
3,
1,
1,
0,
2,
2,
1,
0,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
1,
0,
3,
1,
2,
1,
1,
1,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
1,
0,
0,
2,
1,
0,
1,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
3,
1,
1,
1,
1,
1,
0,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
3,
0,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
0,
0,
0,
1,
0,
1,
2,
0,
1,
2,
0,
1,
0,
2,
0,
1,
2,
1,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
0,
0,
0,
0,
0,
3,
0,
0,
0,
0,
1,
0,
2,
0,
1,
2,
1,
2,
1,
1,
1,
0,
2,
0,
1,
1,
1,
2,
0,
0,
0,
1,
0,
0,
2,
0,
1,
0,
0,
1,
3,
1,
2,
1,
0,
1,
1,
2,
1,
1,
2,
0,
2,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
2,
0,
1,
2,
1,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
2,
3,
0,
2,
0,
0,
3,
1,
2,
0,
1,
2,
0,
1,
0,
2,
2,
1,
1,
0,
2,
0,
1,
2,
1,
1,
2,
0,
2,
1,
1,
0,
1,
1,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
2,
0,
0,
0,
0,
3,
1,
1,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
0,
1,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
2,
3,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
1,
1,
2,
0,
1,
3,
1,
0,
0,
1,
3,
1,
2,
0,
1,
1,
1,
2,
1,
2,
2,
1,
2,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
1,
0,
1,
0,
2,
0,
1,
0,
0,
0,
2,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
3,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
0,
0,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
1,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
0,
2,
1,
0,
1,
0,
2,
1,
0,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
1,
0,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
0,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
0,
2,
0,
1,
1,
1,
1,
3,
1,
0,
1,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
0,
2,
1,
0,
1,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
0,
2,
0,
2,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
1,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
1,
1,
2,
1,
1,
0,
0,
0,
0,
2,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
0,
2,
0,
0,
3,
0,
1,
0,
2,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
1,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
0,
1,
1,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
0,
1,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
0,
0,
1,
0,
0,
2,
1,
2,
0,
2,
1,
1,
2,
0,
2,
2,
1,
2,
1,
2,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
2,
1,
0,
3,
0,
1,
0,
2,
0,
0,
1,
1,
2,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
1,
1,
3,
0,
2,
1,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
1,
1,
0,
1,
1,
1,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
2,
3,
1,
1,
0,
1,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
0,
1,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
0,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
1,
0,
1,
0,
1,
0,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
3,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
0,
1,
0,
2,
2,
1,
1,
0,
2,
0,
1,
2,
0,
2,
3,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
0,
0,
0,
1,
0,
0,
2,
1,
1,
0,
2,
1,
1,
2,
0,
0,
0,
0,
1,
0,
2,
2,
1,
0,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
1,
1,
0,
0,
2,
0,
2,
0,
0,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
1,
2,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
2,
0,
0,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
0,
1,
0,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
0,
0,
1,
0,
2,
2,
1,
0,
0,
0,
3,
0,
2,
0,
1,
0,
1,
2,
1,
0,
1,
0,
1,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
1,
2,
2,
0,
2,
0,
1,
2,
1,
1,
0,
2,
1,
1,
1,
1,
1,
3,
0,
1,
1,
2,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
0,
2,
1,
0,
0,
1,
0,
1,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
1,
1,
1,
2,
1,
0,
0,
2,
3,
1,
0,
1,
2,
1,
0,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
0,
0,
1,
0,
0,
1,
2,
1,
0,
1,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
1,
1,
1,
3,
0,
0,
1,
0,
3,
1,
2,
0,
0,
0,
0,
1,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
1,
0,
1,
0,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
0,
1,
1,
2,
1,
2,
1,
0,
1,
0,
2,
0,
1,
0,
1,
1,
0,
1,
1,
1,
1,
0,
1,
3,
1,
0,
1,
2,
1,
1,
2,
0,
1,
3,
0,
2,
1,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
2,
1,
0,
1,
1,
2,
1,
2,
0,
2,
1,
1,
2,
0,
0,
2,
0,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
1,
0,
2,
3,
1,
1,
1,
1,
2,
1,
0,
0,
0,
3,
1,
1,
1,
2,
2,
1,
0,
1,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
1,
2,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
0,
1,
2,
3,
1,
1,
1,
2,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
0,
2,
0,
2,
0,
0,
2,
0,
0,
0,
1,
2,
0,
1,
2,
1,
0,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
1,
0,
1,
0,
1,
0,
0,
2,
0,
1,
3,
1,
1,
1,
2,
3,
1,
2,
1,
1,
3,
0,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
1,
1,
1,
0,
2,
0,
1,
0,
1,
1,
3,
0,
0,
0,
0,
0,
1,
1,
0,
2,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
2,
1,
1,
2,
1,
1,
0,
2,
3,
0,
2,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
1,
0,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
3,
1,
1,
0,
0,
1,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
0,
0,
0,
0,
2,
1,
0,
0,
0,
2,
0,
2,
1,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
2,
0,
1,
0,
0,
2,
1,
1,
1,
1,
0,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
0,
0,
2,
0,
0,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
0,
2,
0,
1,
0,
0,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
0,
2,
0,
2,
0,
0,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
0,
0,
1,
1,
0,
0,
3,
1,
1,
0,
2,
2,
1,
0,
1,
2,
2,
1,
1,
1,
2,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
0,
1,
1,
2,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
1,
1,
0,
0,
0,
2,
1,
1,
1,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
1,
0,
2,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
0,
1,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
3,
0,
1,
0,
2,
0,
0,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
0,
0,
0,
1,
2,
1,
2,
2,
1,
1,
0,
2,
1,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
0,
1,
1,
1,
2,
1,
2,
0,
0,
0,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
2,
1,
1,
0,
0,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
1,
2,
1,
0,
1,
1,
2,
3,
1,
2,
0,
1,
2,
0,
0,
1,
1,
1,
0,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
3,
0,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
1,
0,
1,
1,
1,
2,
1,
0,
0,
0,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
1,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
1,
1,
2,
0,
1,
1,
0,
0,
1,
1,
2,
1,
1,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
1,
1,
1,
0,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
1,
1,
2,
1,
0,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
0,
1,
0,
2,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
0,
1,
0,
2,
2,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
0,
0,
0,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
0,
0,
2,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
1,
1,
2,
0,
1,
1,
1,
1,
0,
2,
3,
0,
2,
0,
1,
0,
1,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
1,
0,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
1,
2,
2,
1,
1,
0,
0,
0,
1,
2,
0,
0,
0,
1,
2,
0,
1,
0,
0,
0,
0,
0,
1,
0,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
1,
1,
3,
0,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
2,
1,
0,
3,
1,
2,
0,
1,
0,
0,
0,
0,
2,
2,
0,
0,
0,
0,
2,
1,
1,
0,
1,
3,
0,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
2,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
3,
1,
1,
0,
0,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
1,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
0,
1,
0,
3,
1,
1,
0,
2,
0,
1,
2,
1,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
0,
0,
2,
1,
0,
1,
1,
3,
0,
0,
0,
0,
1,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
0,
2,
1,
2,
1,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
0,
0,
0,
1,
1,
2,
0,
0,
1,
0,
0,
0,
0,
0,
1,
2,
0,
1,
1,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
0,
1,
1,
0,
2,
0,
2,
0,
2,
2,
1,
1,
0,
2,
0,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
0,
3,
1,
0,
1,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
0,
1,
1,
2,
0,
1,
3,
0,
1,
0,
1,
3,
1,
1,
1,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
2,
1,
0,
1,
1,
1,
2,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
2,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
2,
1,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
1,
1,
3,
1,
1,
0,
1,
2,
1,
1,
1,
1,
1,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
0,
1,
2,
1,
0,
2,
1,
0,
0,
2,
3,
1,
2,
0,
2,
2,
1,
1,
0,
0,
0,
1,
2,
0,
1,
0,
0,
2,
0,
2,
1,
1,
1,
0,
2,
3,
1,
1,
1,
2,
2,
1,
2,
0,
1,
1,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
2,
0,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
0,
2,
1,
2,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
0,
2,
1,
0,
2,
0,
1,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
1,
0,
1,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
1,
0,
2,
1,
2,
1,
1,
3,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
1,
2,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
0,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
2,
0,
1,
2,
1,
1,
1,
0,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
3,
0,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
0,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
3,
0,
2,
0,
1,
3,
1,
1,
0,
0,
1,
1,
0,
1,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
0,
0,
1,
1,
1,
0,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
1,
2,
0,
2,
0,
0,
1,
0,
1,
1,
0,
2,
0,
0,
0,
0,
1,
0,
0,
0,
0,
2,
0,
2,
3,
1,
1,
1,
1,
0,
0,
2,
1,
0,
2,
1,
1,
0,
1,
0,
1,
0,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
2,
0,
0,
2,
0,
2,
1,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
1,
0,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
1,
0,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
0,
1,
1,
2,
0,
0,
0,
0,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
1,
2,
1,
0,
0,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
1,
3,
1,
0,
0,
1,
0,
0,
2,
0,
2,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
1,
1,
0,
2,
0,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
1,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
0,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
2,
1,
1,
3,
0,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
0,
1,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
1,
2,
2,
0,
1,
0,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
1,
1,
1,
1,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
1,
1,
1,
1,
1,
3,
0,
1,
1,
1,
2,
1,
1,
0,
1,
3,
0,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
1,
0,
0,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
0,
1,
1,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
1,
0,
2,
0,
0,
2,
1,
0,
0,
2,
2,
0,
1,
0,
1,
3,
1,
0,
1,
1,
0,
1,
2,
0,
0,
0,
1,
2,
1,
0,
1,
0,
2,
0,
2,
3,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
0,
0,
2,
1,
1,
2,
1,
2,
0,
0,
0,
0,
1,
1,
0,
2,
1,
0,
0,
0,
1,
1,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
2,
3,
1,
1,
0,
2,
0,
1,
2,
1,
2,
2,
1,
1,
1,
2,
0,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
1,
0,
2,
1,
1,
1,
1,
3,
1,
1,
0,
2,
2,
1,
0,
0,
1,
2,
0,
2,
0,
0,
3,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
0,
0,
0,
1,
1,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
1,
1,
2,
1,
1,
1,
1,
1,
1,
0,
1,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
1,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
1,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
0,
1,
2,
0,
0,
2,
0,
0,
0,
0,
3,
0,
0,
0,
1,
1,
1,
1,
0,
1,
2,
1,
0,
0,
1,
2,
1,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
2,
0,
1,
1,
1,
1,
0,
1,
3,
1,
0,
1,
0,
0,
0,
2,
0,
2,
2,
1,
1,
1,
1,
3,
1,
0,
1,
0,
3,
0,
1,
1,
0,
2,
1,
1,
1,
2,
2,
1,
2,
0,
2,
1,
1,
2,
0,
0,
0,
0,
2,
0,
1,
2,
1,
1,
1,
1,
1,
0,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
3,
1,
0,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
0,
0,
1,
0,
2,
0,
0,
1,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
0,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
2,
0,
1,
1,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
0,
0,
1,
2,
1,
2,
0,
1,
1,
0,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
3,
1,
1,
0,
0,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
1,
0,
2,
0,
1,
0,
1,
2,
0,
2,
1,
0,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
1,
2,
2,
1,
0,
1,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
2,
2,
0,
0,
1,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
0,
1,
1,
0,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
1,
0,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
2,
2,
1,
0,
0,
0,
0,
0,
2,
0,
2,
2,
0,
2,
0,
1,
3,
1,
2,
0,
1,
2,
0,
2,
0,
2,
0,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
0,
1,
1,
0,
2,
1,
0,
1,
0,
1,
3,
1,
2,
0,
1,
0,
1,
0,
0,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
2,
1,
2,
3,
1,
1,
0,
2,
3,
0,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
0,
1,
2,
0,
2,
2,
1,
1,
0,
2,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
0,
1,
0,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
0,
1,
1,
1,
0,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
0,
0,
1,
0,
0,
2,
0,
2,
3,
0,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
0,
0,
1,
2,
0,
1,
0,
1,
1,
0,
2,
3,
0,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
1,
1,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
0,
0,
2,
0,
1,
1,
0,
0,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
0,
2,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
0,
3,
1,
2,
0,
0,
2,
1,
2,
0,
2,
1,
0,
1,
0,
2,
2,
1,
1,
1,
2,
1,
1,
2,
0,
2,
0,
0,
2,
1,
1,
1,
0,
1,
1,
2,
2,
1,
0,
0,
0,
2,
1,
2,
1,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
1,
1,
1,
1,
1,
3,
1,
2,
0,
0,
2,
1,
0,
1,
0,
2,
1,
2,
1,
1,
3,
0,
1,
0,
2,
0,
0,
1,
1,
2,
3,
1,
1,
1,
1,
1,
1,
1,
0,
0,
2,
0,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
3,
0,
1,
0,
0,
2,
1,
1,
0,
0,
1,
0,
1,
0,
1,
1,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
0,
1,
0,
0,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
1,
0,
1,
1,
2,
1,
1,
1,
0,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
1,
0,
0,
2,
1,
0,
1,
0,
3,
0,
0,
0,
0,
3,
1,
2,
1,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
1,
0,
0,
3,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
0,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
2,
0,
0,
1,
1,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
0,
3,
1,
2,
0,
2,
0,
1,
1,
1,
1,
3,
1,
2,
0,
1,
3,
1,
1,
1,
1,
1,
1,
0,
0,
0,
2,
1,
0,
0,
1,
0,
1,
1,
0,
1,
2,
1,
1,
0,
0,
1,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
0,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
3,
1,
2,
0,
1,
1,
0,
0,
0,
1,
2,
1,
0,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
1,
2,
2,
1,
2,
0,
2,
1,
1,
0,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
0,
1,
0,
0,
2,
1,
2,
1,
1,
2,
0,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
1,
3,
1,
1,
0,
2,
0,
0,
2,
1,
1,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
0,
3,
1,
0,
0,
2,
2,
1,
2,
0,
2,
1,
1,
2,
0,
1,
0,
1,
0,
0,
0,
3,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
1,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
1,
0,
3,
1,
0,
0,
1,
2,
0,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
1,
0,
1,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
1,
0,
1,
0,
2,
1,
0,
0,
1,
0,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
0,
1,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
1,
1,
2,
0,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
0,
2,
1,
0,
1,
0,
0,
1,
2,
0,
0,
1,
1,
1,
1,
2,
3,
1,
0,
0,
0,
0,
1,
1,
1,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
0,
2,
1,
0,
1,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
1,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
0,
1,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
1,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
1,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
3,
0,
2,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
0,
3,
0,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
0,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
0,
0,
2,
0,
2,
1,
1,
2,
0,
1,
1,
1,
1,
1,
1,
2,
0,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
0,
1,
1,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
1,
2,
2,
0,
1,
0,
0,
1,
1,
1,
0,
1,
3,
1,
2,
0,
0,
2,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
1,
1,
0,
2,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
1,
1,
1,
0,
1,
1,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
0,
0,
1,
1,
1,
3,
0,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
1,
0,
2,
1,
0,
0,
2,
2,
0,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
0,
0,
0,
1,
0,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
0,
3,
0,
2,
0,
1,
2,
1,
1,
1,
1,
1,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
1,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
2,
0,
0,
3,
1,
1,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
0,
2,
0,
2,
0,
1,
1,
0,
2,
1,
1,
2,
0,
1,
0,
0,
1,
1,
1,
3,
0,
2,
0,
2,
3,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
1,
0,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
3,
1,
0,
1,
2,
1,
0,
1,
1,
1,
1,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
0,
1,
0,
1,
3,
1,
2,
1,
2,
2,
1,
0,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
0,
1,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
0,
1,
0,
2,
1,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
1,
1,
3,
1,
1,
1,
1,
3,
0,
1,
1,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
0,
2,
3,
1,
1,
0,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
2,
0,
0,
0,
0,
0,
3,
0,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
1,
2,
2,
1,
2,
0,
2,
0,
1,
0,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
1,
0,
1,
0,
2,
0,
1,
2,
1,
2,
1,
1,
1,
1,
0,
0,
2,
0,
1,
0,
1,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
1,
0,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
3,
1,
1,
1,
2,
1,
0,
1,
0,
2,
2,
1,
2,
0,
0,
0,
1,
2,
1,
1,
2,
1,
0,
1,
0,
3,
0,
2,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
1,
1,
3,
1,
0,
0,
0,
3,
1,
0,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
1,
1,
3,
0,
2,
1,
1,
3,
0,
2,
1,
1,
2,
1,
2,
0,
0,
0,
0,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
0,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
1,
0,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
2,
0,
1,
2,
1,
0,
2,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
3,
0,
2,
0,
0,
3,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
3,
0,
0,
1,
0,
2,
1,
2,
0,
2,
0,
1,
0,
0,
1,
2,
0,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
1,
2,
2,
1,
2,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
2,
0,
1,
1,
1,
2,
0,
0,
3,
0,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
0,
2,
1,
0,
0,
0,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
0,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
2,
0,
1,
0,
0,
1,
0,
2,
2,
0,
1,
0,
2,
2,
0,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
3,
1,
2,
0,
0,
1,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
0,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
3,
0,
2,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
0,
1,
0,
2,
3,
1,
2,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
1,
1,
1,
0,
2,
0,
1,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
3,
1,
1,
0,
0,
3,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
0,
1,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
2,
2,
1,
1,
0,
0,
0,
0,
2,
0,
1,
2,
1,
1,
0,
0,
1,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
1,
0,
1,
0,
2,
1,
2,
0,
1,
1,
1,
2,
1,
1,
2,
1,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
0,
2,
0,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
0,
0,
0,
0,
0,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
2,
3,
1,
1,
0,
2,
3,
1,
2,
0,
0,
0,
0,
2,
1,
2,
2,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
3,
0,
1,
0,
1,
2,
1,
0,
0,
0,
0,
0,
2,
1,
1,
1,
1,
0,
1,
0,
2,
1,
2,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
1,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
3,
0,
0,
0,
0,
3,
1,
2,
0,
0,
3,
1,
0,
0,
2,
0,
0,
1,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
2,
1,
0,
1,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
0,
0,
1,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
0,
0,
1,
2,
0,
1,
0,
0,
2,
1,
0,
0,
0,
2,
0,
0,
2,
1,
2,
0,
0,
0,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
3,
1,
0,
0,
0,
1,
1,
2,
0,
1,
3,
1,
1,
0,
2,
3,
0,
2,
0,
0,
2,
1,
2,
0,
0,
0,
0,
1,
0,
1,
2,
1,
0,
0,
1,
3,
1,
1,
0,
1,
0,
0,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
0,
0,
2,
1,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
0,
0,
0,
0,
3,
0,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
3,
0,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
1,
0,
1,
2,
0,
1,
2,
0,
2,
0,
1,
0,
0,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
0,
0,
0,
3,
0,
2,
1,
1,
0,
1,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
2,
2,
1,
0,
0,
0,
0,
1,
1,
0,
2,
3,
1,
0,
1,
2,
2,
1,
2,
1,
1,
1,
0,
0,
0,
1,
0,
0,
2,
1,
0,
3,
0,
2,
1,
0,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
0,
0,
2,
0,
0,
2,
1,
0,
0,
1,
2,
0,
2,
0,
0,
2,
0,
0,
2,
1,
1,
1,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
0,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
1,
1,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
0,
1,
1,
1,
2,
0,
2,
3,
1,
2,
1,
1,
0,
0,
1,
0,
1,
0,
0,
2,
0,
2,
3,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
0,
0,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
0,
0,
0,
0,
0,
0,
2,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
1,
1,
2,
0,
2,
0,
1,
1,
0,
2,
3,
1,
2,
0,
0,
0,
0,
2,
1,
0,
2,
1,
2,
0,
1,
0,
0,
1,
1,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
1,
3,
1,
1,
0,
0,
2,
0,
1,
0,
1,
2,
1,
2,
0,
1,
1,
0,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
0,
1,
1,
2,
1,
0,
1,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
0,
2,
1,
1,
2,
1,
1,
1,
2,
1,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
0,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
1,
1,
3,
1,
1,
0,
0,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
3,
1,
0,
0,
2,
2,
1,
2,
0,
2,
1,
1,
2,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
1,
1,
2,
0,
0,
2,
1,
1,
1,
2,
2,
1,
0,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
1,
2,
0,
2,
0,
1,
1,
1,
1,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
2,
1,
0,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
3,
1,
0,
0,
0,
2,
1,
0,
0,
2,
1,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
0,
0,
0,
1,
1,
1,
3,
0,
0,
1,
1,
1,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
1,
2,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
0,
0,
0,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
3,
0,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
1,
0,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
3,
0,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
0,
1,
1,
1,
1,
0,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
2,
0,
0,
0,
1,
0,
2,
1,
0,
1,
0,
2,
1,
1,
0,
1,
0,
1,
0,
0,
2,
1,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
0,
0,
0,
0,
1,
0,
2,
0,
1,
0,
1,
2,
2,
1,
2,
1,
0,
2,
1,
0,
0,
2,
2,
1,
1,
0,
2,
1,
1,
2,
1,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
3,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
0,
0,
0,
1,
3,
1,
0,
0,
0,
3,
0,
2,
0,
1,
0,
1,
2,
0,
2,
1,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
2,
1,
1,
3,
1,
1,
0,
2,
2,
1,
2,
1,
1,
0,
1,
1,
0,
1,
3,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
0,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
1,
0,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
2,
1,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
0,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
3,
0,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
0,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
1,
1,
0,
0,
1,
3,
1,
2,
0,
2,
3,
1,
0,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
1,
0,
2,
1,
0,
2,
0,
2,
0,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
1,
0,
2,
0,
1,
3,
1,
1,
0,
0,
2,
1,
1,
0,
0,
2,
0,
2,
0,
0,
3,
0,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
0,
0,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
2,
1,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
0,
0,
2,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
3,
1,
1,
0,
2,
0,
0,
0,
0,
0,
2,
1,
0,
0,
1,
1,
1,
2,
0,
1,
3,
1,
2,
0,
2,
0,
1,
0,
0,
1,
1,
0,
2,
0,
2,
2,
1,
1,
1,
2,
2,
1,
0,
0,
0,
0,
0,
2,
0,
2,
3,
0,
1,
1,
2,
2,
1,
2,
1,
1,
2,
1,
1,
1,
1,
2,
1,
1,
1,
2,
1,
1,
0,
0,
0,
2,
1,
0,
1,
1,
3,
0,
2,
0,
1,
2,
1,
0,
1,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
0,
1,
1,
3,
0,
1,
0,
1,
0,
1,
1,
0,
2,
3,
1,
2,
0,
0,
2,
1,
1,
0,
1,
0,
0,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
2,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
3,
0,
1,
0,
1,
0,
0,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
0,
3,
1,
1,
0,
0,
2,
1,
1,
0,
1,
3,
0,
1,
0,
2,
2,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
1,
0,
2,
1,
1,
0,
0,
2,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
0,
1,
1,
0,
2,
0,
0,
1,
0,
1,
3,
1,
1,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
1,
1,
3,
1,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
1,
0,
0,
0,
0,
2,
1,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
0,
2,
1,
0,
1,
0,
2,
1,
2,
3,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
2,
0,
1,
1,
0,
2,
2,
1,
1,
0,
2,
0,
0,
1,
1,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
1,
1,
1,
0,
2,
2,
1,
2,
1,
1,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
3,
1,
2,
0,
2,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
2,
0,
1,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
3,
1,
0,
0,
1,
0,
1,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
0,
1,
0,
2,
0,
2,
2,
1,
2,
1,
1,
0,
1,
1,
0,
2,
2,
1,
2,
1,
1,
1,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
1,
1,
2,
3,
0,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
0,
2,
0,
0,
3,
0,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
1,
0,
1,
1,
1,
1,
0,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
0,
0,
1,
0,
1,
0,
2,
1,
1,
1,
1,
3,
0,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
2,
0,
1,
1,
0,
0,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
0,
2,
1,
1,
0,
0,
0,
0,
2,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
1,
2,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
0,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
0,
1,
2,
0,
2,
0,
1,
2,
0,
0,
2,
1,
0,
0,
1,
1,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
2,
3,
1,
1,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
1,
0,
2,
3,
1,
1,
0,
2,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
1,
2,
0,
1,
1,
0,
2,
3,
0,
1,
0,
1,
0,
1,
0,
1,
0,
2,
1,
1,
1,
2,
1,
0,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
0,
1,
0,
1,
0,
0,
1,
0,
1,
3,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
1,
0,
0,
2,
1,
2,
0,
2,
3,
1,
0,
1,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
1,
1,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
1,
1,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
2,
3,
1,
1,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
1,
0,
2,
3,
0,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
2,
3,
1,
0,
0,
0,
2,
0,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
1,
1,
0,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
1,
1,
1,
0,
1,
3,
0,
0,
0,
1,
2,
0,
0,
1,
0,
2,
1,
2,
0,
1,
3,
0,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
1,
1,
1,
0,
2,
3,
0,
2,
1,
1,
1,
1,
2,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
1,
1,
1,
1,
1,
0,
2,
1,
0,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
1,
0,
0,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
3,
0,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
0,
0,
2,
1,
1,
1,
0,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
1,
2,
2,
1,
2,
0,
0,
0,
0,
0,
1,
1,
2,
0,
0,
1,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
3,
1,
0,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
0,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
0,
1,
0,
2,
1,
1,
0,
1,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
2,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
0,
0,
1,
0,
1,
0,
1,
2,
1,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
0,
0,
1,
1,
0,
1,
2,
1,
0,
0,
1,
3,
1,
2,
0,
2,
2,
1,
0,
1,
0,
0,
1,
1,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
1,
1,
1,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
0,
1,
1,
2,
0,
0,
0,
0,
1,
1,
2,
1,
0,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
1,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
0,
0,
1,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
0,
0,
2,
0,
0,
0,
0,
1,
0,
1,
2,
0,
1,
0,
2,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
0,
2,
0,
1,
0,
0,
1,
0,
0,
3,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
2,
2,
1,
2,
0,
0,
3,
1,
0,
0,
1,
2,
1,
1,
0,
0,
0,
0,
2,
0,
2,
0,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
2,
0,
0,
2,
0,
2,
0,
0,
1,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
1,
2,
1,
1,
0,
1,
3,
1,
0,
0,
2,
0,
0,
2,
0,
0,
2,
1,
1,
0,
0,
2,
1,
1,
1,
2,
1,
0,
1,
0,
2,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
0,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
1,
1,
2,
0,
2,
1,
1,
0,
1,
2,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
0,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
3,
0,
2,
0,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
2,
3,
1,
1,
1,
1,
2,
1,
1,
0,
0,
1,
1,
2,
0,
1,
3,
1,
1,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
1,
2,
2,
1,
2,
0,
0,
0,
0,
2,
0,
2,
3,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
3,
1,
2,
1,
1,
3,
1,
2,
0,
2,
0,
1,
1,
0,
1,
1,
0,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
2,
0,
2,
1,
0,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
0,
0,
0,
1,
0,
2,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
1,
1,
1,
0,
1,
2,
1,
2,
1,
1,
0,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
1,
1,
0,
1,
1,
0,
0,
1,
1,
2,
0,
0,
0,
0,
1,
0,
2,
2,
1,
0,
0,
0,
0,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
1,
0,
1,
0,
2,
3,
1,
1,
1,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
1,
2,
2,
1,
1,
0,
2,
2,
1,
0,
1,
1,
1,
1,
1,
0,
1,
2,
0,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
0,
0,
1,
2,
0,
2,
0,
0,
0,
0,
0,
2,
1,
2,
0,
0,
0,
0,
2,
0,
2,
2,
0,
2,
0,
1,
3,
1,
1,
0,
0,
2,
1,
1,
1,
2,
0,
0,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
0,
2,
0,
2,
2,
1,
2,
1,
1,
0,
0,
1,
0,
2,
0,
1,
2,
0,
1,
3,
1,
1,
0,
2,
1,
1,
0,
1,
0,
2,
1,
0,
0,
0,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
3,
0,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
1,
3,
1,
2,
1,
1,
3,
0,
1,
0,
1,
1,
0,
2,
0,
2,
1,
1,
0,
1,
0,
3,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
2,
0,
0,
1,
1,
2,
0,
2,
3,
1,
1,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
1,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
0,
1,
2,
0,
2,
1,
1,
0,
0,
0,
2,
0,
0,
0,
0,
1,
1,
2,
0,
1,
3,
1,
2,
1,
0,
2,
0,
2,
0,
2,
2,
1,
2,
1,
1,
0,
0,
0,
1,
0,
1,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
1,
1,
0,
0,
2,
0,
0,
3,
1,
1,
1,
1,
2,
1,
1,
0,
0,
2,
0,
2,
0,
0,
3,
1,
0,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
1,
1,
0,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
2,
1,
0,
3,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
2,
1,
1,
2,
1,
1,
1,
1,
1,
0,
2,
0,
0,
0,
0,
1,
0,
1,
2,
0,
2,
1,
1,
2,
1,
2,
1,
1,
2,
0,
2,
1,
1,
1,
0,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
0,
2,
0,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
0,
1,
0,
1,
1,
1,
3,
1,
1,
0,
0,
2,
1,
1,
1,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
1,
1,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
0,
1,
1,
1,
1,
0,
0,
2,
0,
1,
1,
0,
2,
1,
0,
3,
1,
0,
0,
0,
0,
0,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
0,
0,
1,
0,
1,
1,
0,
2,
0,
1,
2,
0,
0,
2,
1,
0,
0,
1,
2,
0,
1,
0,
2,
0,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
1,
1,
2,
0,
2,
0,
1,
1,
0,
0,
2,
1,
0,
1,
2,
2,
1,
1,
1,
1,
2,
1,
0,
0,
1,
3,
1,
2,
0,
2,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
0,
0,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
0,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
0,
0,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
3,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
0,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
0,
0,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
1,
0,
1,
0,
0,
2,
1,
0,
0,
0,
1,
1,
0,
0,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
0,
1,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
1,
1,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
2,
1,
1,
0,
1,
1,
0,
2,
0,
0,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
0,
0,
1,
0,
0,
2,
0,
0,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
1,
0,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
3,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
3,
0,
1,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
1,
0,
2,
2,
1,
1,
0,
1,
1,
0,
2,
0,
0,
0,
1,
1,
1,
1,
2,
0,
0,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
2,
3,
0,
2,
0,
1,
0,
1,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
1,
1,
1,
2,
1,
1,
1,
2,
3,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
3,
0,
1,
1,
1,
1,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
2,
1,
1,
1,
0,
1,
0,
0,
3,
0,
2,
0,
2,
0,
1,
2,
1,
1,
2,
1,
0,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
0,
1,
0,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
2,
0,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
1,
1,
1,
3,
0,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
0,
1,
2,
0,
0,
2,
1,
1,
0,
1,
1,
1,
1,
0,
2,
0,
0,
1,
0,
0,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
2,
1,
2,
0,
0,
1,
1,
0,
0,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
1,
1,
0,
0,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
1,
2,
1,
1,
2,
1,
1,
3,
0,
1,
0,
1,
1,
0,
0,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
1,
1,
0,
1,
2,
0,
1,
0,
1,
1,
0,
0,
3,
1,
1,
1,
2,
1,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
0,
0,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
0,
2,
1,
2,
2,
1,
2,
1,
0,
0,
1,
0,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
1,
0,
2,
1,
0,
0,
0,
0,
3,
0,
1,
1,
1,
1,
1,
2,
0,
1,
0,
1,
2,
0,
0,
1,
1,
0,
0,
1,
1,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
2,
1,
1,
2,
1,
1,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
0,
0,
0,
2,
3,
0,
0,
0,
2,
0,
0,
2,
0,
1,
2,
0,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
1,
0,
0,
0,
1,
2,
0,
2,
2,
0,
0,
0,
0,
2,
1,
1,
0,
2,
3,
0,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
1,
0,
1,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
3,
1,
2,
1,
1,
1,
1,
0,
0,
2,
3,
1,
2,
0,
2,
0,
1,
1,
0,
0,
0,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
1,
0,
2,
2,
1,
0,
0,
1,
2,
1,
0,
1,
2,
1,
1,
2,
0,
2,
3,
0,
1,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
1,
1,
2,
1,
1,
0,
1,
2,
0,
2,
0,
1,
1,
1,
0,
0,
0,
2,
0,
2,
0,
2,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
0,
0,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
0,
0,
0,
1,
0,
3,
1,
1,
1,
2,
3,
0,
0,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
1,
0,
2,
1,
0,
1,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
0,
2,
0,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
0,
1,
1,
1,
1,
2,
1,
2,
3,
0,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
0,
0,
2,
2,
1,
0,
0,
0,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
0,
1,
0,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
3,
1,
1,
0,
0,
0,
1,
2,
1,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
0,
1,
0,
1,
3,
1,
1,
0,
0,
1,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
2,
2,
1,
1,
1,
1,
1,
0,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
1,
0,
1,
0,
2,
0,
1,
2,
0,
0,
0,
0,
1,
0,
1,
2,
0,
2,
0,
0,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
0,
2,
1,
2,
2,
0,
1,
0,
0,
0,
0,
2,
0,
2,
0,
1,
2,
1,
1,
2,
0,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
3,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
1,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
2,
0,
1,
0,
1,
1,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
1,
0,
0,
0,
1,
2,
1,
1,
3,
0,
2,
0,
1,
2,
1,
2,
1,
1,
3,
1,
1,
0,
1,
0,
0,
2,
0,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
0,
0,
0,
2,
1,
1,
2,
0,
1,
3,
0,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
0,
2,
1,
1,
2,
1,
2,
1,
1,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
0,
2,
1,
2,
0,
2,
1,
0,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
1,
0,
2,
0,
1,
1,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
1,
0,
0,
1,
1,
0,
1,
1,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
0,
2,
1,
2,
2,
1,
2,
0,
2,
3,
0,
2,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
0,
2,
1,
1,
2,
1,
0,
1,
1,
2,
1,
0,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
1,
0,
0,
2,
0,
1,
3,
0,
0,
0,
2,
3,
0,
2,
0,
1,
1,
0,
2,
1,
1,
2,
0,
2,
0,
2,
3,
1,
1,
0,
1,
1,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
0,
1,
0,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
2,
1,
1,
1,
0,
1,
3,
1,
2,
0,
0,
0,
1,
0,
0,
2,
1,
1,
0,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
0,
0,
1,
1,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
0,
1,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
2,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
1,
1,
0,
1,
0,
1,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
2,
1,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
3,
0,
0,
0,
1,
1,
1,
2,
1,
1,
3,
0,
0,
1,
1,
2,
1,
0,
0,
0,
1,
1,
2,
0,
0,
2,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
3,
1,
2,
1,
1,
1,
1,
2,
1,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
1,
0,
1,
3,
1,
0,
0,
0,
0,
0,
0,
0,
1,
2,
1,
1,
0,
1,
1,
1,
1,
1,
0,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
2,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
0,
0,
1,
1,
1,
0,
1,
2,
0,
0,
2,
1,
0,
0,
1,
0,
0,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
0,
3,
1,
1,
0,
1,
3,
1,
2,
1,
2,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
3,
1,
0,
1,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
0,
2,
0,
2,
0,
1,
0,
1,
0,
1,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
0,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
0,
2,
1,
1,
1,
0,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
1,
1,
1,
1,
0,
1,
0,
1,
0,
0,
2,
1,
2,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
0,
1,
1,
0,
2,
0,
1,
2,
0,
0,
0,
1,
2,
0,
2,
0,
0,
2,
0,
0,
2,
1,
1,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
3,
0,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
1,
1,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
0,
2,
0,
0,
1,
1,
2,
0,
2,
2,
1,
0,
1,
1,
2,
1,
1,
0,
0,
1,
1,
2,
1,
1,
2,
0,
1,
0,
2,
2,
1,
0,
1,
0,
2,
1,
2,
0,
0,
3,
1,
0,
1,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
3,
0,
0,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
2,
1,
1,
1,
0,
1,
1,
1,
0,
0,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
1,
1,
1,
0,
0,
1,
1,
2,
0,
0,
2,
0,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
1,
0,
1,
1,
2,
2,
1,
2,
0,
0,
0,
1,
2,
0,
0,
1,
1,
2,
0,
0,
3,
1,
1,
0,
1,
0,
1,
1,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
0,
3,
0,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
2,
0,
0,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
1,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
0,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
0,
1,
1,
0,
0,
2,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
1,
1,
2,
1,
1,
2,
1,
2,
1,
0,
3,
1,
2,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
2,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
2,
1,
2,
0,
0,
1,
1,
2,
0,
0,
3,
1,
2,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
1,
1,
1,
3,
1,
1,
1,
1,
1,
1,
2,
1,
0,
2,
1,
2,
0,
0,
3,
0,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
0,
1,
1,
0,
2,
3,
1,
0,
0,
1,
1,
1,
2,
0,
2,
3,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
1,
1,
2,
0,
1,
1,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
0,
0,
0,
1,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
3,
1,
2,
1,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
1,
2,
2,
1,
1,
0,
1,
3,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
0,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
1,
2,
1,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
2,
2,
1,
2,
1,
2,
0,
0,
1,
1,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
1,
1,
0,
2,
0,
1,
0,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
1,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
1,
1,
2,
0,
2,
1,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
1,
2,
1,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
0,
0,
0,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
1,
2,
0,
0,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
0,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
1,
0,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
1,
0,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
1,
1,
0,
2,
0,
2,
2,
1,
2,
1,
0,
0,
1,
2,
0,
0,
0,
1,
2,
0,
0,
3,
1,
1,
0,
1,
1,
1,
0,
0,
2,
1,
0,
1,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
3,
1,
0,
0,
0,
3,
1,
0,
1,
2,
0,
1,
1,
0,
1,
2,
1,
2,
1,
2,
2,
1,
1,
1,
1,
3,
1,
1,
0,
2,
2,
1,
0,
0,
0,
0,
0,
2,
0,
0,
2,
1,
2,
0,
0,
3,
1,
0,
0,
0,
0,
0,
0,
0,
0,
3,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
0,
2,
0,
1,
1,
1,
2,
2,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
0,
2,
1,
2,
2,
1,
2,
0,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
0,
0,
1,
0,
0,
0,
3,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
1,
0,
2,
3,
1,
1,
0,
2,
2,
1,
0,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
1,
1,
3,
0,
1,
1,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
3,
1,
0,
0,
0,
2,
0,
2,
0,
1,
2,
0,
0,
0,
0,
2,
1,
2,
0,
0,
3,
1,
1,
0,
2,
0,
1,
0,
0,
0,
3,
1,
1,
0,
0,
3,
1,
1,
1,
1,
3,
1,
1,
0,
1,
0,
0,
2,
1,
2,
0,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
1,
1,
0,
1,
2,
0,
1,
3,
0,
2,
0,
0,
2,
1,
0,
1,
0,
3,
0,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
0,
2,
0,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
0,
0,
1,
2,
0,
0,
1,
1,
1,
3,
0,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
1,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
2,
3,
1,
2,
1,
1,
3,
0,
1,
0,
2,
2,
1,
1,
1,
2,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
3,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
0,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
0,
1,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
0,
0,
1,
0,
0,
1,
0,
2,
2,
1,
0,
0,
1,
3,
1,
1,
1,
2,
3,
0,
1,
0,
1,
2,
1,
1,
1,
0,
2,
1,
0,
1,
0,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
3,
0,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
2,
0,
0,
2,
0,
1,
2,
1,
0,
0,
1,
3,
0,
1,
0,
1,
2,
1,
2,
1,
1,
0,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
1,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
1,
0,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
1,
1,
0,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
0,
1,
1,
0,
0,
2,
1,
1,
1,
2,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
1,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
0,
0,
1,
0,
1,
0,
1,
1,
2,
1,
0,
0,
2,
2,
1,
1,
0,
1,
1,
1,
2,
1,
1,
3,
0,
0,
1,
0,
0,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
1,
2,
2,
1,
1,
1,
2,
2,
1,
2,
0,
0,
1,
1,
2,
1,
1,
2,
1,
1,
0,
2,
3,
1,
2,
1,
1,
3,
0,
2,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
0,
0,
3,
1,
1,
0,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
1,
0,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
0,
2,
1,
1,
0,
1,
1,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
1,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
0,
2,
0,
2,
2,
1,
0,
1,
0,
1,
1,
1,
0,
1,
2,
1,
1,
0,
2,
1,
0,
2,
0,
2,
2,
1,
2,
1,
1,
1,
1,
1,
0,
2,
0,
0,
2,
0,
0,
1,
1,
0,
0,
0,
0,
1,
2,
0,
0,
3,
1,
2,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
0,
0,
2,
0,
0,
0,
1,
2,
1,
1,
0,
0,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
1,
0,
2,
0,
1,
1,
0,
2,
2,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
1,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
2,
3,
1,
2,
0,
2,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
0,
1,
0,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
1,
0,
2,
3,
1,
0,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
0,
0,
0,
0,
0,
1,
0,
2,
2,
1,
1,
0,
2,
0,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
0,
2,
0,
2,
2,
1,
1,
0,
2,
3,
1,
2,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
0,
1,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
0,
1,
0,
2,
0,
1,
1,
0,
2,
2,
1,
0,
1,
2,
0,
0,
0,
0,
1,
3,
1,
2,
0,
1,
1,
1,
2,
1,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
1,
0,
0,
3,
1,
1,
1,
1,
3,
1,
2,
1,
0,
3,
0,
2,
0,
0,
2,
0,
2,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
0,
2,
2,
1,
1,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
1,
0,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
0,
1,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
1,
2,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
0,
1,
2,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
0,
0,
2,
1,
0,
0,
0,
1,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
3,
0,
0,
0,
1,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
2,
0,
0,
0,
0,
0,
1,
2,
0,
1,
0,
1,
0,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
1,
0,
2,
2,
0,
2,
0,
1,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
2,
0,
0,
0,
0,
0,
0,
0,
1,
1,
2,
0,
1,
2,
0,
2,
1,
2,
2,
1,
2,
1,
0,
2,
1,
1,
0,
0,
0,
1,
2,
1,
1,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
3,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
2,
0,
1,
2,
0,
1,
1,
1,
0,
0,
0,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
0,
2,
0,
2,
1,
1,
2,
1,
1,
3,
1,
1,
1,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
0,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
1,
1,
0,
0,
0,
3,
0,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
1,
2,
2,
0,
2,
0,
0,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
0,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
1,
0,
0,
2,
1,
1,
0,
2,
0,
1,
0,
0,
1,
3,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
2,
0,
1,
0,
2,
2,
1,
2,
0,
0,
3,
1,
1,
0,
0,
3,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
0,
1,
1,
1,
3,
0,
2,
0,
2,
3,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
1,
1,
3,
1,
0,
0,
0,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
0,
0,
1,
2,
1,
2,
1,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
0,
0,
0,
0,
0,
1,
0,
1,
0,
2,
1,
0,
0,
0,
2,
0,
1,
0,
1,
2,
0,
0,
0,
0,
2,
0,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
1,
0,
0,
1,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
0,
1,
3,
1,
1,
0,
1,
1,
1,
2,
1,
1,
1,
0,
0,
1,
0,
1,
0,
2,
0,
1,
0,
1,
1,
0,
1,
3,
0,
2,
0,
0,
0,
1,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
1,
0,
0,
3,
0,
1,
0,
0,
2,
1,
2,
0,
0,
3,
1,
0,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
1,
2,
2,
1,
0,
0,
0,
2,
1,
0,
1,
1,
1,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
0,
0,
2,
1,
1,
3,
1,
1,
0,
2,
1,
1,
2,
0,
1,
1,
1,
2,
0,
2,
3,
1,
0,
0,
0,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
1,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
1,
2,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
3,
1,
1,
0,
2,
0,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
0,
2,
0,
0,
1,
0,
2,
0,
1,
0,
1,
2,
1,
2,
3,
0,
1,
0,
1,
1,
1,
1,
0,
1,
1,
1,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
2,
3,
1,
1,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
1,
2,
0,
0,
0,
1,
2,
1,
2,
3,
0,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
0,
0,
3,
0,
1,
0,
1,
2,
1,
1,
1,
1,
1,
1,
0,
0,
0,
2,
0,
2,
0,
1,
0,
1,
2,
0,
2,
0,
0,
0,
0,
1,
0,
1,
2,
0,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
1,
1,
0,
1,
2,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
0,
0,
0,
2,
2,
1,
1,
0,
2,
1,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
1,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
1,
0,
0,
3,
1,
2,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
0,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
1,
0,
0,
2,
0,
1,
1,
0,
2,
1,
0,
0,
0,
1,
3,
1,
1,
0,
1,
0,
0,
0,
0,
0,
3,
1,
0,
0,
0,
3,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
0,
2,
1,
1,
0,
0,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
1,
0,
2,
3,
0,
0,
0,
1,
3,
1,
2,
0,
0,
0,
1,
1,
1,
1,
0,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
0,
1,
0,
0,
0,
1,
0,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
1,
1,
2,
0,
2,
1,
1,
1,
0,
2,
0,
0,
2,
0,
0,
3,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
1,
0,
1,
1,
2,
0,
1,
0,
0,
2,
1,
2,
0,
0,
1,
1,
1,
0,
2,
0,
1,
1,
0,
0,
2,
1,
2,
1,
2,
2,
0,
0,
1,
2,
3,
0,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
2,
2,
1,
1,
0,
0,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
0,
0,
3,
0,
0,
1,
0,
1,
1,
2,
1,
2,
2,
1,
2,
0,
0,
2,
0,
2,
1,
2,
2,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
2,
0,
0,
2,
1,
2,
0,
0,
1,
1,
1,
1,
1,
1,
0,
2,
0,
1,
0,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
1,
2,
2,
1,
0,
0,
1,
0,
0,
2,
1,
1,
2,
1,
1,
0,
0,
0,
1,
2,
1,
2,
3,
0,
1,
0,
0,
0,
1,
2,
1,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
1,
2,
3,
1,
2,
0,
2,
1,
1,
1,
0,
2,
1,
0,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
0,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
1,
1,
2,
1,
0,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
1,
1,
0,
2,
1,
1,
0,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
1,
0,
1,
2,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
0,
1,
0,
2,
1,
0,
0,
0,
2,
0,
2,
0,
1,
1,
0,
1,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
3,
1,
2,
0,
2,
3,
1,
1,
0,
2,
1,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
2,
1,
1,
0,
2,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
3,
0,
0,
0,
0,
0,
1,
0,
1,
1,
3,
1,
2,
1,
1,
2,
0,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
1,
1,
1,
0,
1,
1,
1,
2,
0,
0,
0,
0,
2,
0,
2,
0,
0,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
2,
0,
1,
1,
0,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
2,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
1,
1,
1,
0,
0,
0,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
3,
0,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
1,
0,
2,
0,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
0,
2,
1,
1,
1,
0,
1,
0,
2,
0,
0,
0,
0,
0,
0,
1,
2,
1,
0,
2,
0,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
0,
1,
1,
3,
1,
2,
0,
1,
0,
1,
1,
0,
0,
1,
0,
2,
1,
2,
1,
0,
1,
0,
0,
0,
1,
2,
1,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
0,
1,
0,
2,
0,
1,
2,
0,
2,
2,
1,
0,
0,
1,
0,
1,
2,
0,
2,
3,
1,
2,
1,
1,
1,
1,
2,
0,
1,
0,
0,
2,
0,
0,
2,
1,
0,
0,
0,
1,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
0,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
3,
1,
2,
1,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
0,
2,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
0,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
0,
0,
2,
0,
2,
0,
0,
1,
0,
2,
0,
0,
2,
0,
0,
0,
1,
0,
0,
0,
0,
0,
2,
0,
2,
1,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
1,
1,
2,
1,
0,
1,
1,
1,
2,
1,
2,
0,
1,
2,
0,
2,
1,
1,
0,
0,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
1,
1,
1,
3,
0,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
1,
0,
2,
1,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
1,
1,
3,
1,
2,
1,
1,
1,
0,
0,
0,
0,
1,
1,
2,
1,
1,
2,
1,
0,
1,
0,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
1,
2,
1,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
1,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
3,
1,
0,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
1,
1,
1,
0,
2,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
1,
1,
2,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
3,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
3,
1,
0,
0,
0,
0,
0,
2,
0,
2,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
2,
0,
0,
1,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
3,
1,
1,
0,
1,
0,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
1,
0,
2,
0,
1,
1,
1,
1,
0,
2,
3,
1,
2,
1,
2,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
0,
1,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
1,
0,
2,
1,
1,
0,
1,
2,
0,
0,
1,
0,
0,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
1,
1,
0,
3,
0,
2,
0,
1,
1,
0,
2,
0,
1,
0,
1,
1,
1,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
3,
1,
0,
1,
0,
2,
1,
2,
0,
2,
3,
1,
1,
0,
2,
3,
1,
2,
0,
2,
3,
1,
0,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
0,
2,
3,
1,
2,
0,
0,
3,
0,
0,
0,
0,
0,
1,
1,
1,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
1,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
0,
1,
2,
0,
0,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
0,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
0,
1,
0,
3,
1,
2,
0,
0,
0,
0,
2,
0,
1,
2,
0,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
0,
0,
0,
0,
0,
2,
0,
2,
3,
0,
2,
0,
0,
1,
0,
2,
1,
1,
1,
1,
1,
0,
1,
3,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
1,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
0,
2,
1,
1,
2,
1,
0,
0,
0,
0,
0,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
2,
2,
1,
1,
0,
1,
1,
1,
1,
1,
0,
3,
0,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
2,
0,
0,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
0,
1,
0,
2,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
2,
1,
1,
1,
2,
1,
0,
0,
0,
0,
0,
0,
1,
0,
2,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
1,
2,
1,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
2,
0,
0,
0,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
0,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
1,
2,
1,
1,
1,
0,
0,
0,
1,
2,
0,
0,
0,
1,
2,
1,
1,
1,
0,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
3,
1,
1,
0,
0,
1,
1,
1,
1,
2,
2,
1,
0,
1,
0,
2,
0,
2,
1,
2,
2,
1,
2,
1,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
1,
0,
2,
1,
0,
1,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
0,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
2,
1,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
2,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
3,
0,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
1,
1,
0,
0,
3,
0,
2,
1,
1,
2,
0,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
1,
0,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
1,
1,
1,
0,
1,
0,
0,
2,
0,
2,
1,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
0,
2,
1,
1,
2,
0,
1,
3,
1,
1,
0,
2,
0,
1,
0,
1,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
3,
0,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
1,
0,
1,
0,
1,
2,
0,
2,
0,
2,
0,
1,
1,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
2,
0,
0,
1,
1,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
3,
1,
1,
0,
0,
2,
1,
2,
0,
0,
0,
0,
1,
1,
1,
1,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
0,
2,
0,
0,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
1,
1,
2,
0,
2,
1,
1,
1,
1,
1,
2,
0,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
0,
1,
0,
2,
2,
1,
0,
0,
2,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
2,
1,
0,
0,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
0,
0,
0,
0,
2,
0,
0,
0,
0,
1,
0,
1,
3,
1,
2,
1,
0,
0,
1,
1,
0,
2,
2,
1,
1,
0,
2,
3,
1,
0,
1,
1,
3,
1,
0,
0,
0,
1,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
0,
2,
1,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
1,
0,
0,
0,
0,
0,
2,
0,
0,
0,
1,
2,
1,
1,
0,
1,
1,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
1,
0,
3,
0,
2,
0,
2,
0,
0,
0,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
0,
1,
2,
1,
0,
2,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
0,
1,
2,
1,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
2,
1,
0,
1,
0,
1,
3,
1,
1,
1,
1,
0,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
1,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
2,
0,
2,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
0,
0,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
0,
0,
1,
3,
1,
2,
1,
2,
3,
1,
1,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
0,
1,
0,
0,
0,
0,
1,
1,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
0,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
1,
1,
2,
1,
1,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
2,
2,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
0,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
2,
1,
0,
0,
2,
0,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
0,
1,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
2,
1,
0,
0,
2,
0,
1,
2,
1,
1,
3,
0,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
1,
0,
0,
0,
2,
2,
1,
2,
0,
0,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
0,
2,
1,
1,
0,
0,
2,
0,
0,
2,
1,
2,
3,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
1,
1,
1,
1,
1,
0,
2,
0,
1,
0,
0,
0,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
0,
0,
2,
1,
1,
3,
0,
0,
0,
1,
2,
1,
2,
0,
2,
1,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
2,
1,
2,
3,
0,
1,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
1,
1,
1,
0,
0,
1,
0,
2,
1,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
0,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
0,
1,
0,
2,
0,
0,
0,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
0,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
1,
1,
2,
1,
1,
0,
1,
0,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
0,
1,
1,
2,
1,
0,
0,
2,
1,
1,
2,
0,
2,
0,
0,
2,
0,
1,
0,
0,
2,
0,
1,
3,
0,
0,
0,
1,
0,
0,
1,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
1,
0,
1,
3,
1,
2,
1,
2,
0,
1,
2,
0,
2,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
1,
2,
1,
1,
1,
0,
1,
0,
0,
0,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
0,
0,
0,
0,
1,
0,
0,
2,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
1,
2,
0,
2,
0,
1,
2,
1,
2,
3,
1,
0,
0,
1,
0,
1,
0,
0,
0,
3,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
0,
0,
1,
0,
1,
2,
1,
0,
1,
2,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
1,
1,
2,
0,
0,
0,
1,
2,
1,
2,
1,
0,
0,
0,
0,
2,
1,
1,
0,
0,
1,
1,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
0,
1,
1,
2,
1,
1,
0,
0,
0,
0,
2,
1,
1,
2,
1,
2,
0,
0,
0,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
0,
1,
0,
0,
0,
0,
0,
2,
0,
2,
3,
0,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
1,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
1,
2,
1,
1,
1,
0,
0,
0,
0,
1,
0,
2,
2,
1,
2,
1,
0,
2,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
0,
2,
1,
1,
1,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
1,
2,
2,
1,
0,
1,
1,
1,
0,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
1,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
1,
1,
1,
1,
0,
0,
0,
3,
0,
0,
1,
1,
0,
1,
0,
0,
1,
2,
0,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
0,
0,
1,
1,
1,
2,
1,
0,
2,
0,
2,
0,
1,
2,
1,
2,
1,
1,
1,
0,
2,
0,
0,
3,
1,
0,
0,
2,
0,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
1,
1,
1,
3,
0,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
3,
1,
0,
0,
0,
2,
1,
2,
1,
1,
1,
1,
0,
0,
1,
2,
0,
2,
0,
2,
0,
0,
2,
1,
1,
2,
1,
1,
1,
2,
2,
1,
1,
0,
1,
0,
0,
1,
0,
2,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
1,
1,
3,
1,
0,
1,
1,
1,
1,
2,
0,
2,
0,
1,
0,
0,
0,
1,
1,
2,
0,
2,
0,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
2,
3,
1,
2,
1,
1,
1,
1,
1,
0,
1,
3,
1,
2,
0,
1,
1,
1,
2,
0,
0,
2,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
0,
2,
3,
1,
2,
0,
1,
3,
0,
1,
0,
1,
1,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
0,
1,
0,
1,
0,
1,
0,
1,
1,
3,
1,
1,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
2,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
0,
1,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
0,
1,
0,
3,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
1,
0,
2,
1,
1,
0,
1,
1,
1,
2,
0,
2,
3,
1,
1,
0,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
0,
1,
0,
0,
1,
2,
0,
2,
0,
0,
0,
0,
0,
0,
1,
3,
1,
2,
1,
2,
0,
1,
1,
1,
1,
3,
0,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
0,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
0,
0,
1,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
1,
0,
2,
1,
1,
0,
1,
2,
1,
0,
1,
0,
1,
1,
2,
1,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
0,
1,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
1,
0,
1,
1,
1,
2,
0,
1,
0,
0,
2,
0,
0,
0,
1,
1,
1,
2,
3,
1,
1,
0,
0,
3,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
1,
1,
0,
0,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
0,
0,
2,
0,
0,
0,
1,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
1,
2,
2,
1,
2,
0,
2,
1,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
1,
0,
2,
1,
1,
0,
2,
2,
1,
1,
1,
2,
1,
0,
0,
0,
0,
2,
1,
0,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
1,
0,
0,
2,
1,
2,
0,
2,
1,
1,
2,
1,
2,
2,
1,
2,
0,
1,
3,
1,
1,
0,
0,
3,
1,
2,
0,
2,
2,
1,
0,
1,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
0,
1,
0,
1,
2,
3,
0,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
2,
0,
2,
3,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
0,
0,
1,
1,
1,
0,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
1,
0,
0,
1,
1,
1,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
1,
0,
2,
0,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
1,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
3,
1,
0,
0,
2,
2,
1,
2,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
0,
2,
0,
2,
0,
0,
0,
0,
0,
2,
0,
2,
0,
1,
1,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
3,
0,
1,
1,
2,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
0,
2,
0,
1,
0,
0,
1,
0,
1,
1,
1,
2,
1,
1,
1,
0,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
0,
0,
0,
1,
0,
0,
3,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
0,
0,
1,
3,
1,
2,
0,
2,
3,
1,
2,
0,
1,
1,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
0,
1,
1,
0,
2,
0,
0,
2,
1,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
1,
3,
1,
1,
0,
2,
0,
0,
2,
0,
1,
3,
0,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
2,
0,
2,
0,
1,
1,
0,
2,
3,
1,
1,
1,
1,
0,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
1,
2,
1,
0,
1,
0,
1,
1,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
0,
1,
1,
1,
3,
0,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
1,
0,
0,
1,
3,
1,
1,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
0,
2,
1,
0,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
3,
1,
1,
0,
2,
3,
1,
0,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
1,
1,
1,
1,
2,
1,
1,
2,
1,
1,
0,
0,
3,
1,
1,
0,
1,
3,
1,
1,
1,
1,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
1,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
0,
0,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
1,
2,
2,
1,
2,
1,
1,
2,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
2,
1,
0,
0,
0,
0,
1,
2,
1,
2,
2,
1,
0,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
0,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
0,
1,
2,
0,
0,
3,
1,
0,
0,
1,
2,
1,
2,
1,
0,
0,
1,
2,
0,
0,
1,
0,
1,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
2,
0,
1,
0,
1,
1,
2,
0,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
1,
2,
1,
1,
2,
0,
1,
0,
0,
1,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
0,
1,
0,
2,
1,
0,
0,
2,
1,
0,
0,
0,
0,
2,
0,
1,
0,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
0,
0,
0,
1,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
0,
2,
0,
1,
0,
0,
2,
0,
0,
1,
1,
2,
1,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
0,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
1,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
0,
1,
0,
2,
2,
1,
0,
0,
1,
3,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
0,
0,
0,
2,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
0,
0,
1,
1,
1,
0,
1,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
0,
2,
0,
2,
1,
1,
1,
1,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
0,
0,
1,
2,
0,
2,
3,
1,
1,
0,
1,
3,
1,
0,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
0,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
1,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
0,
0,
0,
0,
1,
1,
2,
0,
2,
3,
1,
0,
0,
2,
0,
0,
2,
1,
1,
0,
0,
0,
1,
0,
3,
0,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
0,
0,
1,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
0,
3,
1,
0,
1,
0,
1,
1,
2,
0,
0,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
0,
1,
2,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
0,
0,
2,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
0,
2,
1,
2,
1,
1,
1,
0,
1,
0,
1,
3,
0,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
1,
1,
2,
0,
2,
0,
1,
2,
0,
1,
1,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
3,
1,
2,
0,
2,
1,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
1,
2,
1,
1,
1,
0,
0,
0,
0,
1,
1,
2,
1,
0,
1,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
3,
0,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
0,
2,
0,
1,
0,
0,
1,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
1,
0,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
1,
0,
1,
1,
0,
1,
1,
2,
1,
2,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
1,
0,
2,
1,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
0,
1,
0,
1,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
1,
0,
2,
0,
0,
0,
0,
0,
2,
0,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
0,
2,
1,
2,
1,
0,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
0,
2,
1,
1,
3,
1,
1,
0,
0,
2,
1,
1,
0,
1,
0,
1,
0,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
1,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
0,
3,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
0,
0,
2,
0,
2,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
1,
0,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
0,
1,
1,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
1,
1,
2,
0,
2,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
0,
1,
0,
2,
1,
1,
0,
0,
2,
1,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
2,
0,
1,
3,
0,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
1,
2,
2,
1,
0,
0,
2,
3,
1,
0,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
0,
1,
0,
1,
0,
0,
0,
3,
1,
2,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
3,
0,
2,
0,
2,
1,
0,
2,
0,
1,
0,
0,
2,
1,
1,
3,
1,
2,
0,
1,
2,
0,
2,
1,
2,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
1,
1,
0,
2,
3,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
0,
2,
0,
1,
0,
0,
2,
0,
1,
1,
1,
2,
1,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
1,
0,
1,
2,
1,
2,
1,
1,
0,
0,
1,
2,
1,
2,
1,
1,
1,
0,
1,
1,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
0,
0,
1,
1,
2,
0,
0,
2,
1,
0,
1,
0,
0,
1,
1,
1,
1,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
0,
3,
0,
0,
0,
0,
1,
1,
2,
0,
0,
3,
1,
1,
0,
0,
3,
0,
2,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
0,
0,
1,
2,
2,
1,
1,
0,
1,
2,
0,
1,
0,
2,
2,
1,
2,
0,
0,
2,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
2,
0,
0,
1,
0,
0,
2,
0,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
0,
0,
1,
1,
1,
0,
0,
1,
2,
1,
2,
1,
2,
2,
1,
0,
0,
1,
0,
1,
1,
0,
0,
2,
1,
0,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
1,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
0,
2,
0,
1,
3,
1,
2,
1,
0,
1,
1,
0,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
1,
2,
2,
1,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
1,
0,
2,
3,
1,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
0,
1,
0,
2,
1,
1,
0,
2,
2,
1,
2,
1,
2,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
3,
1,
0,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
2,
0,
0,
3,
1,
1,
0,
2,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
0,
1,
1,
2,
2,
1,
1,
0,
2,
0,
0,
2,
1,
1,
2,
1,
1,
0,
2,
3,
1,
2,
0,
0,
2,
0,
1,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
0,
2,
0,
0,
0,
1,
1,
0,
2,
2,
1,
2,
1,
2,
2,
1,
1,
1,
1,
2,
1,
0,
1,
0,
2,
1,
0,
0,
1,
2,
0,
0,
0,
0,
2,
0,
2,
0,
2,
3,
1,
1,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
3,
1,
1,
0,
0,
0,
1,
1,
0,
2,
3,
1,
2,
1,
1,
0,
0,
2,
0,
1,
1,
1,
2,
0,
2,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
2,
1,
1,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
0,
0,
1,
0,
0,
2,
1,
1,
3,
0,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
1,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
0,
0,
2,
0,
2,
0,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
0,
0,
2,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
0,
2,
1,
2,
1,
1,
0,
1,
0,
0,
2,
2,
1,
2,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
1,
1,
2,
1,
2,
0,
2,
0,
0,
2,
1,
1,
0,
0,
1,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
3,
1,
1,
1,
1,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
1,
2,
0,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
1,
1,
0,
0,
1,
2,
0,
0,
0,
2,
0,
0,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
1,
0,
2,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
0,
1,
0,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
1,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
3,
0,
1,
0,
2,
3,
1,
0,
0,
0,
2,
1,
1,
0,
0,
0,
1,
1,
0,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
2,
1,
0,
0,
2,
0,
0,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
0,
0,
1,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
0,
0,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
1,
1,
1,
0,
1,
3,
1,
0,
0,
2,
0,
1,
1,
0,
1,
0,
0,
2,
0,
0,
1,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
1,
2,
0,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
1,
1,
1,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
1,
0,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
3,
1,
2,
0,
0,
0,
1,
1,
1,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
3,
1,
0,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
0,
1,
1,
2,
0,
0,
0,
0,
1,
0,
1,
1,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
0,
1,
0,
1,
0,
1,
0,
0,
0,
3,
1,
1,
0,
2,
2,
1,
0,
0,
0,
3,
1,
1,
1,
0,
2,
1,
0,
1,
1,
2,
1,
2,
0,
1,
0,
0,
0,
1,
0,
2,
0,
0,
0,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
2,
3,
1,
1,
1,
2,
2,
1,
2,
0,
1,
1,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
1,
1,
1,
0,
2,
0,
1,
0,
0,
0,
1,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
3,
1,
0,
0,
0,
2,
1,
1,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
0,
0,
2,
1,
2,
1,
1,
0,
1,
2,
0,
0,
2,
1,
1,
0,
1,
1,
1,
2,
0,
0,
2,
1,
1,
0,
0,
1,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
0,
2,
0,
0,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
2,
1,
1,
2,
0,
2,
3,
1,
0,
1,
1,
0,
1,
0,
0,
1,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
3,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
1,
1,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
3,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
1,
0,
2,
1,
1,
1,
0,
1,
0,
0,
3,
1,
1,
0,
1,
3,
1,
0,
0,
2,
2,
1,
0,
0,
0,
1,
1,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
0,
2,
0,
0,
2,
1,
2,
0,
0,
2,
0,
0,
1,
0,
2,
1,
2,
0,
2,
1,
1,
1,
0,
1,
0,
0,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
3,
1,
1,
0,
1,
1,
1,
2,
0,
0,
2,
1,
1,
0,
0,
0,
1,
2,
0,
0,
0,
1,
2,
0,
1,
0,
0,
0,
0,
0,
3,
1,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
0,
3,
1,
2,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
3,
1,
0,
0,
1,
2,
1,
2,
0,
0,
3,
1,
1,
0,
1,
1,
1,
2,
0,
2,
0,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
0,
1,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
0,
2,
1,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
1,
0,
0,
1,
0,
2,
0,
1,
3,
0,
2,
0,
1,
2,
1,
0,
0,
0,
3,
0,
2,
0,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
2,
0,
2,
0,
0,
0,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
2,
1,
1,
1,
0,
2,
1,
1,
2,
1,
2,
1,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
0,
0,
0,
1,
0,
2,
0,
1,
2,
0,
0,
0,
0,
2,
1,
1,
1,
0,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
0,
0,
1,
3,
1,
2,
0,
2,
1,
0,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
1,
0,
0,
1,
0,
3,
0,
2,
0,
1,
3,
1,
2,
0,
2,
3,
1,
0,
0,
0,
2,
1,
2,
0,
1,
1,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
3,
1,
2,
1,
1,
0,
1,
1,
0,
2,
0,
1,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
1,
0,
0,
3,
0,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
0,
2,
0,
1,
3,
1,
0,
0,
1,
2,
1,
0,
1,
0,
2,
1,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
3,
1,
1,
0,
2,
1,
0,
2,
0,
0,
0,
1,
2,
0,
2,
0,
0,
0,
0,
0,
3,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
0,
1,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
3,
1,
0,
0,
0,
3,
1,
0,
0,
0,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
1,
1,
2,
0,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
1,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
0,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
2,
1,
1,
1,
2,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
1,
2,
1,
1,
2,
0,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
0,
2,
0,
2,
0,
1,
1,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
1,
1,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
1,
1,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
3,
1,
2,
1,
1,
2,
0,
2,
0,
2,
3,
1,
2,
0,
1,
1,
1,
1,
0,
1,
3,
1,
0,
0,
0,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
0,
2,
0,
1,
3,
0,
2,
0,
1,
0,
0,
2,
0,
2,
0,
1,
2,
0,
1,
3,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
1,
1,
1,
1,
0,
2,
1,
1,
0,
1,
1,
0,
0,
0,
0,
3,
1,
2,
0,
0,
3,
1,
2,
1,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
1,
0,
2,
1,
1,
0,
2,
0,
0,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
0,
2,
1,
2,
2,
0,
1,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
0,
1,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
1,
2,
2,
1,
2,
0,
2,
0,
0,
1,
0,
2,
0,
0,
2,
0,
1,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
0,
0,
2,
2,
1,
1,
1,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
1,
2,
1,
1,
1,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
1,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
1,
1,
2,
0,
1,
1,
1,
3,
0,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
2,
0,
2,
3,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
0,
1,
0,
3,
1,
0,
0,
2,
1,
1,
2,
0,
2,
2,
1,
0,
1,
0,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
0,
1,
0,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
1,
1,
0,
2,
0,
2,
0,
1,
0,
0,
0,
0,
1,
1,
1,
2,
3,
1,
0,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
1,
1,
3,
0,
1,
0,
2,
1,
1,
2,
0,
1,
1,
1,
1,
0,
2,
3,
1,
1,
0,
2,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
0,
1,
1,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
1,
1,
2,
1,
0,
0,
0,
0,
0,
2,
0,
2,
3,
0,
1,
1,
1,
3,
1,
1,
1,
0,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
0,
1,
0,
1,
2,
1,
1,
1,
2,
1,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
0,
1,
1,
1,
0,
1,
3,
1,
1,
1,
1,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
0,
0,
1,
0,
0,
2,
1,
1,
3,
0,
2,
0,
0,
3,
1,
0,
0,
0,
2,
1,
0,
0,
0,
3,
0,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
2,
1,
1,
1,
0,
1,
0,
1,
2,
1,
2,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
0,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
2,
0,
2,
0,
0,
1,
1,
2,
2,
1,
2,
0,
1,
0,
1,
0,
1,
1,
2,
1,
0,
0,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
2,
0,
2,
2,
0,
2,
0,
1,
3,
1,
0,
0,
2,
3,
1,
1,
0,
1,
2,
0,
1,
0,
2,
2,
1,
0,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
1,
1,
1,
0,
0,
2,
0,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
1,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
2,
0,
0,
2,
0,
2,
3,
1,
1,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
2,
1,
1,
1,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
1,
2,
1,
0,
2,
0,
0,
0,
0,
1,
0,
2,
0,
1,
0,
0,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
0,
0,
1,
0,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
0,
1,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
0,
2,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
1,
1,
0,
2,
1,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
0,
2,
0,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
2,
0,
1,
1,
0,
2,
0,
0,
0,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
1,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
0,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
2,
0,
1,
1,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
1,
2,
1,
2,
1,
1,
3,
0,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
0,
0,
1,
1,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
1,
1,
0,
2,
1,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
0,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
0,
0,
2,
0,
1,
1,
0,
1,
0,
0,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
0,
1,
2,
1,
1,
1,
2,
0,
1,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
0,
1,
1,
0,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
2,
1,
1,
3,
0,
0,
1,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
0,
1,
1,
1,
1,
1,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
3,
1,
2,
0,
2,
0,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
0,
3,
0,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
3,
1,
0,
0,
0,
3,
1,
2,
0,
2,
2,
0,
2,
0,
1,
3,
0,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
2,
0,
1,
2,
0,
0,
3,
1,
2,
0,
2,
2,
1,
1,
0,
0,
1,
1,
2,
1,
1,
1,
0,
0,
1,
0,
2,
1,
1,
1,
1,
2,
1,
0,
1,
0,
2,
1,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
0,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
0,
1,
1,
3,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
0,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
1,
2,
3,
1,
2,
0,
1,
1,
1,
0,
0,
0,
0,
0,
2,
0,
0,
3,
1,
2,
0,
2,
3,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
2,
3,
0,
1,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
0,
1,
0,
2,
1,
1,
2,
0,
1,
2,
0,
0,
0,
0,
0,
1,
0,
0,
2,
3,
1,
2,
0,
2,
0,
1,
0,
0,
1,
0,
0,
1,
1,
2,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
0,
1,
1,
0,
2,
1,
1,
2,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
0,
1,
0,
0,
2,
0,
1,
3,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
0,
0,
0,
3,
1,
1,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
2,
1,
0,
0,
2,
0,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
2,
1,
0,
0,
0,
0,
2,
1,
1,
0,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
1,
1,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
1,
1,
1,
0,
2,
0,
1,
0,
0,
1,
0,
0,
2,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
0,
0,
0,
0,
0,
0,
2,
0,
2,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
1,
1,
0,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
1,
1,
1,
1,
0,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
2,
0,
1,
0,
1,
0,
0,
0,
3,
1,
2,
1,
0,
2,
1,
1,
0,
2,
3,
1,
2,
0,
0,
3,
1,
1,
0,
0,
3,
1,
2,
0,
2,
2,
0,
1,
1,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
1,
1,
1,
0,
1,
2,
1,
2,
1,
2,
1,
1,
2,
1,
2,
2,
0,
1,
0,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
1,
2,
1,
1,
2,
0,
0,
0,
1,
1,
0,
2,
0,
1,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
2,
0,
2,
2,
1,
1,
0,
2,
0,
0,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
0,
0,
0,
0,
2,
2,
0,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
1,
0,
2,
2,
1,
0,
1,
1,
2,
0,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
1,
1,
2,
2,
1,
2,
1,
1,
1,
0,
1,
1,
2,
3,
0,
0,
1,
2,
0,
1,
0,
0,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
0,
2,
1,
1,
0,
2,
2,
0,
1,
0,
1,
3,
0,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
0,
3,
1,
0,
0,
0,
3,
0,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
2,
1,
1,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
1,
1,
1,
2,
2,
1,
0,
1,
1,
1,
0,
2,
1,
1,
2,
1,
2,
0,
0,
3,
1,
1,
0,
1,
0,
0,
2,
0,
2,
1,
1,
2,
0,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
2,
0,
1,
0,
0,
0,
2,
1,
0,
0,
1,
3,
1,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
1,
1,
0,
0,
3,
1,
2,
0,
0,
3,
0,
0,
0,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
0,
1,
0,
0,
2,
2,
1,
1,
0,
1,
1,
0,
2,
0,
2,
2,
1,
2,
1,
0,
0,
1,
2,
1,
1,
2,
1,
2,
1,
2,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
3,
1,
0,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
0,
0,
0,
1,
3,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
3,
1,
2,
1,
0,
2,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
0,
2,
3,
1,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
1,
1,
0,
0,
0,
1,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
0,
0,
1,
1,
2,
0,
0,
0,
0,
0,
1,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
1,
0,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
3,
1,
1,
0,
0,
2,
1,
2,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
3,
1,
2,
0,
2,
0,
1,
2,
0,
2,
2,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
0,
2,
0,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
2,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
1,
1,
0,
1,
0,
1,
0,
1,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
2,
0,
1,
0,
2,
0,
0,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
0,
1,
2,
1,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
1,
2,
0,
1,
3,
1,
2,
0,
2,
1,
1,
0,
1,
0,
2,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
0,
2,
1,
2,
1,
1,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
0,
2,
1,
1,
1,
1,
2,
0,
0,
3,
1,
2,
0,
1,
1,
1,
0,
1,
1,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
0,
0,
2,
0,
0,
0,
1,
1,
0,
2,
0,
0,
1,
0,
0,
0,
1,
2,
0,
0,
0,
1,
2,
1,
2,
2,
1,
1,
0,
2,
2,
0,
2,
0,
1,
0,
1,
1,
1,
1,
0,
0,
2,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
0,
1,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
0,
1,
0,
2,
1,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
3,
1,
1,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
1,
2,
1,
1,
2,
0,
0,
1,
1,
1,
0,
2,
1,
0,
1,
0,
2,
3,
1,
1,
0,
2,
3,
0,
2,
0,
1,
3,
1,
1,
0,
0,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
1,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
0,
2,
0,
0,
0,
0,
2,
0,
1,
0,
0,
1,
0,
0,
3,
1,
2,
0,
2,
1,
1,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
1,
0,
0,
2,
1,
0,
1,
1,
2,
1,
1,
0,
0,
0,
0,
2,
1,
1,
2,
1,
2,
0,
2,
1,
0,
1,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
1,
2,
2,
0,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
1,
2,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
0,
2,
0,
1,
2,
1,
0,
0,
2,
3,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
0,
0,
0,
0,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
3,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
1,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
0,
0,
1,
0,
0,
2,
1,
2,
0,
0,
0,
1,
1,
0,
0,
2,
0,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
3,
1,
2,
1,
1,
3,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
2,
1,
0,
1,
0,
0,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
0,
2,
0,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
2,
0,
0,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
0,
0,
0,
2,
1,
0,
0,
0,
1,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
0,
0,
0,
1,
0,
0,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
1,
1,
3,
1,
2,
0,
2,
2,
1,
2,
1,
1,
1,
1,
2,
0,
1,
3,
1,
2,
1,
1,
3,
0,
2,
0,
0,
0,
1,
1,
0,
2,
0,
1,
2,
1,
1,
3,
0,
1,
0,
1,
2,
1,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
1,
1,
2,
0,
1,
0,
0,
2,
1,
1,
2,
1,
2,
0,
1,
2,
0,
0,
0,
0,
2,
1,
1,
0,
1,
0,
0,
2,
1,
1,
3,
0,
0,
1,
1,
2,
1,
2,
0,
2,
2,
1,
1,
0,
0,
2,
0,
2,
1,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
2,
0,
1,
2,
0,
2,
0,
2,
2,
1,
2,
0,
0,
3,
1,
1,
0,
0,
3,
1,
2,
1,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
3,
1,
0,
0,
0,
1,
0,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
2,
3,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
1,
1,
2,
0,
2,
0,
0,
1,
1,
1,
2,
0,
1,
0,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
0,
0,
0,
1,
1,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
0,
1,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
0,
0,
2,
0,
2,
2,
1,
2,
0,
2,
1,
1,
0,
1,
0,
3,
0,
2,
0,
2,
2,
1,
2,
0,
1,
3,
0,
1,
0,
2,
1,
1,
0,
0,
1,
0,
0,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
1,
0,
1,
3,
1,
0,
1,
0,
2,
1,
2,
0,
1,
1,
0,
2,
0,
1,
2,
0,
2,
0,
0,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
1,
0,
1,
3,
1,
1,
0,
2,
0,
0,
2,
1,
0,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
2,
0,
2,
2,
1,
2,
0,
2,
3,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
0,
0,
1,
1,
1,
0,
0,
0,
2,
1,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
1,
2,
3,
1,
1,
1,
1,
0,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
3,
1,
2,
0,
2,
0,
0,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
0,
2,
0,
2,
0,
0,
0,
1,
1,
0,
1,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
1,
1,
0,
1,
2,
0,
1,
2,
0,
1,
0,
2,
3,
1,
2,
0,
0,
3,
1,
1,
0,
0,
0,
0,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
1,
1,
2,
0,
2,
2,
1,
0,
1,
0,
1,
0,
2,
0,
2,
0,
1,
1,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
1,
0,
2,
1,
2,
1,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
0,
2,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
0,
1,
0,
1,
0,
1,
2,
0,
2,
3,
0,
1,
1,
1,
2,
1,
1,
1,
2,
1,
0,
0,
0,
2,
3,
1,
2,
0,
1,
1,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
0,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
0,
0,
0,
1,
0,
2,
1,
1,
0,
0,
0,
0,
2,
0,
2,
3,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
1,
1,
0,
1,
0,
2,
1,
0,
0,
1,
3,
1,
1,
1,
1,
2,
1,
0,
0,
0,
0,
0,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
2,
1,
0,
1,
1,
0,
1,
0,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
2,
3,
1,
2,
0,
0,
2,
0,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
0,
1,
2,
0,
0,
3,
1,
0,
0,
0,
2,
1,
1,
0,
2,
0,
1,
0,
0,
0,
1,
1,
2,
1,
1,
2,
1,
1,
0,
1,
0,
1,
2,
1,
1,
1,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
0,
2,
0,
1,
0,
1,
0,
1,
0,
2,
1,
0,
1,
0,
3,
0,
2,
1,
1,
2,
1,
2,
0,
0,
1,
1,
1,
0,
1,
1,
1,
2,
1,
1,
3,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
2,
0,
0,
3,
1,
2,
0,
0,
3,
1,
2,
0,
0,
1,
0,
2,
1,
1,
2,
1,
2,
1,
0,
2,
1,
2,
1,
2,
2,
1,
2,
1,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
0,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
3,
1,
0,
0,
1,
3,
0,
2,
0,
1,
1,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
1,
1,
1,
3,
1,
0,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
1,
0,
0,
2,
0,
1,
1,
1,
2,
1,
1,
0,
1,
1,
0,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
0,
0,
0,
2,
0,
2,
0,
2,
2,
1,
2,
1,
1,
2,
1,
1,
0,
2,
0,
0,
2,
0,
0,
3,
1,
1,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
1,
1,
0,
0,
3,
0,
2,
0,
1,
2,
1,
2,
0,
0,
0,
1,
0,
1,
1,
2,
1,
0,
1,
1,
2,
1,
0,
1,
2,
1,
1,
1,
0,
1,
0,
0,
1,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
0,
0,
0,
2,
0,
1,
1,
1,
2,
0,
2,
3,
0,
2,
0,
2,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
0,
2,
1,
1,
0,
0,
2,
0,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
0,
1,
2,
1,
0,
2,
1,
0,
0,
0,
0,
1,
2,
1,
1,
1,
1,
1,
1,
2,
3,
0,
2,
0,
2,
2,
1,
2,
0,
1,
3,
0,
1,
0,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
1,
2,
3,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
0,
0,
0,
0,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
2,
0,
0,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
2,
1,
1,
0,
2,
1,
1,
1,
0,
0,
2,
1,
2,
1,
1,
3,
1,
0,
1,
0,
2,
0,
1,
1,
2,
2,
1,
0,
1,
1,
2,
1,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
0,
0,
0,
2,
0,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
0,
0,
0,
1,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
1,
1,
1,
3,
1,
0,
0,
0,
2,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
0,
0,
0,
0,
0,
2,
0,
1,
0,
1,
2,
0,
1,
0,
0,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
0,
1,
0,
0,
0,
0,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
2,
0,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
0,
1,
1,
3,
1,
0,
0,
1,
1,
1,
2,
1,
1,
0,
0,
2,
0,
2,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
1,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
3,
1,
2,
0,
2,
1,
1,
2,
0,
2,
2,
1,
2,
1,
1,
3,
1,
2,
1,
1,
2,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
0,
0,
1,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
2,
2,
1,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
2,
1,
2,
1,
1,
3,
1,
2,
0,
0,
0,
0,
2,
0,
2,
3,
1,
2,
0,
2,
0,
0,
1,
0,
2,
0,
0,
2,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
1,
0,
1,
2,
0,
2,
2,
1,
1,
0,
0,
2,
1,
0,
0,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
0,
3,
1,
0,
1,
0,
2,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
1,
1,
0,
1,
0,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
1,
0,
0,
1,
1,
2,
1,
1,
3,
0,
0,
0,
0,
3,
1,
2,
0,
0,
2,
1,
1,
1,
1,
3,
1,
2,
1,
2,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
0,
2,
0,
2,
2,
1,
0,
0,
2,
0,
1,
1,
0,
0,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
0,
0,
1,
0,
1,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
0,
1,
0,
0,
1,
1,
1,
3,
0,
2,
0,
1,
1,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
1,
1,
1,
0,
0,
2,
1,
2,
0,
2,
1,
1,
2,
0,
1,
2,
0,
2,
0,
2,
0,
0,
1,
0,
1,
1,
0,
2,
1,
1,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
0,
0,
0,
1,
1,
1,
0,
2,
1,
1,
2,
0,
1,
0,
0,
2,
0,
2,
0,
1,
2,
1,
1,
3,
1,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
0,
0,
0,
1,
3,
0,
2,
1,
1,
2,
1,
1,
1,
1,
2,
0,
2,
0,
1,
0,
0,
2,
0,
0,
3,
1,
0,
0,
0,
1,
1,
0,
0,
2,
0,
1,
2,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
0,
0,
1,
0,
1,
1,
1,
2,
1,
1,
2,
1,
0,
1,
2,
2,
1,
2,
0,
2,
0,
1,
2,
1,
1,
1,
1,
1,
1,
0,
1,
1,
2,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
1,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
2,
0,
1,
0,
2,
3,
1,
2,
1,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
0,
1,
1,
0,
2,
2,
1,
1,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
2,
2,
1,
2,
0,
2,
2,
1,
2,
0,
2,
1,
1,
2,
1,
0,
2,
1,
1,
0,
1,
0,
0,
2,
0,
0,
2,
1,
0,
1,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
3,
1,
0,
0,
0,
0,
0,
1,
1,
0,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
1,
1,
0,
0,
0,
2,
0,
2,
1,
1,
1,
0,
2,
1,
1,
2,
1,
1,
1,
1,
1,
0,
0,
0,
2,
0,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
1,
1,
1,
2,
1,
1,
1,
1,
2,
1,
0,
1,
1,
3,
1,
2,
0,
0,
3,
1,
0,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
1,
0,
1,
0,
0,
3,
1,
1,
0,
1,
3,
1,
2,
1,
1,
2,
1,
0,
0,
0,
3,
1,
2,
0,
1,
0,
0,
0,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
1,
1,
1,
0,
2,
1,
0,
2,
0,
1,
1,
1,
0,
0,
0,
1,
1,
1,
1,
2,
2,
0,
1,
1,
2,
2,
0,
2,
0,
1,
3,
1,
2,
0,
1,
2,
1,
1,
0,
2,
2,
0,
2,
0,
2,
0,
0,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
0,
0,
0,
1,
1,
1,
2,
1,
1,
1,
1,
3,
0,
0,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
0,
2,
0,
1,
1,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
1,
2,
0,
1,
2,
1,
1,
1,
0,
0,
0,
1,
0,
0,
1,
0,
1,
0,
1,
1,
1,
0,
0,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
2,
0,
1,
1,
1,
2,
3,
0,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
1,
0,
1,
1,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
1,
1,
1,
1,
1,
0,
1,
0,
1,
2,
1,
0,
0,
2,
3,
1,
2,
1,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
2,
1,
0,
2,
1,
2,
0,
2,
3,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
1,
3,
1,
2,
1,
1,
3,
0,
2,
0,
1,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
1,
0,
0,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
2,
1,
1,
2,
1,
1,
2,
1,
1,
0,
2,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
0,
0,
1,
0,
2,
2,
1,
1,
0,
0,
0,
0,
2,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
1,
2,
1,
0,
1,
1,
2,
1,
1,
0,
0,
1,
1,
2,
0,
1,
0,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
2,
3,
1,
1,
0,
0,
0,
1,
0,
0,
1,
2,
1,
2,
0,
1,
0,
0,
2,
1,
1,
1,
1,
2,
0,
2,
2,
1,
1,
0,
1,
1,
0,
1,
0,
1,
2,
1,
2,
0,
1,
3,
1,
2,
1,
2,
2,
1,
2,
0,
2,
3,
1,
2,
1,
1,
1,
0,
1,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
1,
0,
0,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
0,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
0,
2,
0,
1,
2,
1,
2,
1,
1,
2,
1,
1,
1,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
1,
2,
1,
2,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
0,
1,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
1,
1,
2,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
1,
1,
2,
0,
2,
3,
1,
2,
1,
1,
3,
1,
1,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
0,
0,
0,
0,
1,
0,
2,
0,
0,
1,
1,
1,
0,
0,
2,
1,
1,
2,
1,
0,
1,
0,
0,
0,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
1,
0,
0,
2,
1,
2,
0,
1,
1,
1,
2,
0,
1,
0,
1,
1,
0,
2,
0,
0,
2,
0,
1,
3,
1,
1,
0,
1,
1,
1,
0,
0,
0,
2,
1,
1,
0,
0,
2,
1,
2,
0,
2,
1,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
1,
0,
1,
1,
0,
0,
2,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
1,
1,
1,
0,
1,
2,
1,
2,
0,
1,
1,
0,
0,
1,
1,
3,
1,
0,
1,
0,
0,
1,
1,
0,
2,
0,
1,
2,
0,
2,
3,
1,
0,
0,
1,
2,
1,
1,
1,
1,
2,
1,
2,
1,
1,
2,
1,
1,
0,
0,
2,
1,
2,
0,
1,
0,
0,
2,
0,
2,
3,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
3,
0,
2,
0,
1,
0,
0,
1,
0,
1,
0,
1,
2,
1,
2,
2,
1,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
0,
2,
1,
0,
0,
0,
1,
1,
0,
1,
2,
0,
1,
0,
0,
0,
3,
1,
2,
1,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
0,
0,
2,
1,
1,
1,
1,
0,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
2,
3,
1,
2,
1,
2,
2,
1,
1,
0,
2,
2,
1,
2,
0,
1,
0,
0,
2,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
2,
1,
1,
1,
0,
2,
0,
1,
0,
0,
1,
0,
2,
3,
1,
2,
0,
1,
3,
1,
2,
0,
0,
2,
1,
1,
1,
2,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
1,
1,
2,
0,
2,
1,
1,
2,
1,
0,
2,
1,
0,
1,
0,
2,
1,
2,
0,
1,
1,
1,
1,
1,
1,
3,
0,
1,
0,
1,
2,
1,
1,
0,
1,
2,
1,
1,
0,
0,
1,
1,
1,
0,
1,
2,
1,
2,
0,
2,
0,
0,
2,
0,
2,
2,
0,
0,
0,
0,
2,
1,
2,
1,
1,
3,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
2,
1,
0,
0,
2,
2,
1,
2,
0,
0,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
3,
1,
2,
0,
0,
1,
1,
1,
1,
1,
2,
1,
2,
0,
2,
3,
1,
2,
0,
2,
2,
1,
2,
0,
0,
0,
1,
1,
0,
1,
2,
1,
2,
0,
0,
0,
0,
1,
0,
2,
2,
1,
1,
0,
1,
2,
1,
1,
1,
1,
1,
0,
2,
0,
0,
3,
1,
2,
0,
2,
3,
1,
1,
0,
1,
0,
0,
1,
1,
1,
2,
1,
0,
0,
1,
2,
1,
2,
0,
1,
3,
0,
1,
1,
1,
3,
1,
2,
0,
0,
1,
0,
2,
0,
1,
2,
1,
1,
0,
1,
2,
1,
0,
0,
1,
2,
1,
1,
0,
1,
3,
1,
1,
0,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
2,
3,
0,
2,
0,
0,
0,
1,
2,
1,
1,
2,
1,
1,
0,
2,
0,
1,
2,
0,
1,
1,
0,
1,
0,
1,
2,
1,
2,
1,
1,
0,
1,
1,
0,
2,
2,
0,
2,
0,
0,
1,
1,
0,
1,
0,
2,
1,
2,
0,
1,
3,
0,
2,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
1,
1,
1,
0,
1,
2,
2,
1,
2,
0,
0,
0,
0,
1,
1,
1,
2,
1,
2,
0,
1,
3,
1,
2,
0,
1,
0,
0,
1,
1,
2,
0,
1,
2,
1,
0,
1,
0,
2,
0,
1,
2,
1,
0,
1,
0,
1,
0,
2,
0,
1,
1,
1,
0,
0,
0,
2,
1,
2,
0,
0,
0,
1,
0,
0,
2,
2,
1,
1,
0,
1,
2,
1,
2,
1,
1,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
0,
1,
1,
1,
1,
2,
1,
2,
0,
1,
2,
0,
0,
0,
2,
2,
1,
1,
1,
1,
1,
1,
0,
0,
0,
1,
1,
2,
1,
1,
2,
1,
2,
0,
1,
0,
1,
1,
0,
2,
2,
1,
2,
0,
0,
3,
1,
2,
1,
1,
1,
1,
1,
0,
0,
0,
1,
2,
0,
2,
2,
1,
2,
0,
1,
3,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
1,
2,
0,
2,
0,
1,
1,
0,
2,
3,
1,
2,
0,
1,
2,
1,
2,
0,
2,
2,
1,
0,
0,
0,
0,
0,
1,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
1,
2,
1,
0,
0,
0,
0,
1,
2,
0,
1,
2,
1,
2,
0,
1,
3,
1,
0,
0,
0,
2,
1,
1,
0,
1,
3,
1,
1,
1,
0,
1,
1,
2,
0,
1,
2,
1,
2,
1,
2,
2,
1,
1,
0,
0,
2,
1,
1,
0,
2,
3,
1,
2,
1,
2,
2,
1,
2,
0,
1,
2,
1,
2,
0,
1,
2,
1,
2,
1,
2,
0,
0,
2,
0,
1,
3,
0,
0,
0,
2,
3,
1,
0,
0,
0,
2,
1,
1,
1,
1,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
0,
1,
0,
2,
3,
1,
1,
1,
1,
1,
0,
1,
1,
2,
2,
1,
2,
0,
1,
0,
0,
1,
0,
1,
0,
1,
2,
0,
1,
0,
1,
2,
0,
2,
2,
1,
0,
0,
0,
1,
1,
0,
0,
0,
3,
1,
1,
0,
0,
2,
1,
1,
0,
2,
3,
1,
1,
0,
1,
2,
1,
1,
0,
2,
0,
0,
1,
0,
2,
1,
0,
2,
0,
0,
2,
0,
1,
0,
1,
3,
1,
2,
0,
1,
0,
1,
2,
0,
2,
3,
1,
1,
0,
0,
2,
0,
0,
1,
0,
1,
1,
2,
0,
1,
2,
1,
1,
1,
2,
2,
1,
2,
0,
1,
0,
1,
2,
0,
1,
1,
1,
1,
1,
2,
2,
1,
1,
0,
0,
2,
1,
2,
1,
2,
3,
1,
2,
0,
1,
2,
1,
0,
0,
0,
2,
1,
0,
0,
0,
2,
1,
2,
0,
2,
0,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
1,
0,
1,
2,
1,
2,
0,
2,
1,
1,
2,
0,
0,
2,
1,
2,
0,
2,
2,
1,
1,
0,
0,
2,
1,
2,
0,
0,
0,
1,
2,
0,
1,
0,
0,
1,
0,
1,
3,
1,
2,
0,
1,
0,
1,
0,
0,
0,
2,
1,
2,
0,
0,
2,
1,
1,
0,
1,
2,
0,
2,
1,
1,
2,
1,
2,
0,
2,
3,
1,
1,
0,
1,
3,
1,
1,
1,
0,
0,
1,
1,
1,
2,
2,
1,
1,
0,
1,
2,
1,
2,
0,
1,
0,
1,
2,
0,
0,
2,
1,
2,
1,
2,
3,
0,
1,
0,
2,
0,
1,
2,
0,
0,
3,
1,
0,
0,
0,
0,
1,
2,
0,
0,
2,
1,
2,
0,
2,
0,
1,
1,
0,
0,
0,
1,
2,
0,
1,
0,
1,
2,
0,
1,
3,
0,
1,
0,
0,
2,
1,
1,
0,
2,
0,
1,
2,
0,
2,
3,
1,
2,
0,
1,
3,
1,
1,
0,
1,
1,
1,
0,
1,
0,
2,
1,
1,
0,
2,
3,
1,
2,
0,
2,
0,
1,
2,
0,
2,
3,
1,
1,
0,
0,
0,
1,
1,
1,
2,
1,
0,
1,
0,
2,
2,
1,
1,
0,
2,
2,
0,
2,
0,
0,
2,
0,
2,
0,
2,
1,
1,
0,
0,
0,
2,
1,
2,
1,
1,
2,
1,
2,
0,
2,
0,
1,
1,
0,
1,
2,
1,
2,
0,
1,
2,
1,
]
dis_answer = [
0.355124470003,
1.24045554632e-05,
0.00571262073676,
0.775345672281,
0.0992360700386,
]
| 7.004505
| 67
| 0.143429
| 75,072
| 525,541
| 1.004023
| 0.000813
| 0.455197
| 0.412344
| 0.367978
| 0.995038
| 0.995038
| 0.994998
| 0.994945
| 0.994375
| 0.989532
| 0
| 0.499515
| 0.713758
| 525,541
| 75,028
| 68
| 7.004598
| 0.001536
| 0.000459
| 0
| 0.99976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
8e0ae546e784f3fff93f1ec2faf70b4eb57dbf81
| 42
|
py
|
Python
|
tests/test_main.py
|
cacrespo/pylexis
|
5f9b1b04e33b59e6b040023ffe8c1a657d38bdcc
|
[
"MIT"
] | 3
|
2022-01-08T20:20:06.000Z
|
2022-01-09T21:58:39.000Z
|
tests/test_main.py
|
cacrespo/pylexis
|
5f9b1b04e33b59e6b040023ffe8c1a657d38bdcc
|
[
"MIT"
] | 9
|
2021-12-30T13:04:21.000Z
|
2022-02-09T23:03:21.000Z
|
tests/test_main.py
|
cacrespo/pylexis
|
5f9b1b04e33b59e6b040023ffe8c1a657d38bdcc
|
[
"MIT"
] | null | null | null |
import pytest
def test_TODO():
pass
| 7
| 16
| 0.666667
| 6
| 42
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 42
| 5
| 17
| 8.4
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
6d0501d7e59420e878570ade083b97833c0755ea
| 48
|
py
|
Python
|
ufdl-speech-app/src/ufdl/speech_app/routers/__init__.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
ufdl-speech-app/src/ufdl/speech_app/routers/__init__.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | 85
|
2020-07-24T00:04:28.000Z
|
2022-02-10T10:35:15.000Z
|
ufdl-speech-app/src/ufdl/speech_app/routers/__init__.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
from ._UFDLSpeechRouter import UFDLSpeechRouter
| 24
| 47
| 0.895833
| 4
| 48
| 10.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b64d445c030c7b36364432b75f280e1a9a88940c
| 22,058
|
py
|
Python
|
models/cifarnet.py
|
CrispyHarder/deep-weight-prior
|
b87e61d6ad590c61b90e188ec86bfb956073be65
|
[
"MIT"
] | 41
|
2019-02-12T10:15:19.000Z
|
2021-02-14T00:04:47.000Z
|
models/cifarnet.py
|
CrispyHarder/deep-weight-prior
|
b87e61d6ad590c61b90e188ec86bfb956073be65
|
[
"MIT"
] | 1
|
2020-10-25T21:18:59.000Z
|
2020-10-27T23:20:34.000Z
|
models/cifarnet.py
|
CrispyHarder/deep-weight-prior
|
b87e61d6ad590c61b90e188ec86bfb956073be65
|
[
"MIT"
] | 8
|
2019-08-26T01:55:26.000Z
|
2021-01-23T22:18:35.000Z
|
import torch
from models import bayes
from torch import nn
from collections import OrderedDict
import utils
from torch import distributions as dist
import numpy as np
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class CIFARNet(bayes._BayesNet):
def __init__(self, cfg, device=None, n_classes=10, do=[], k=1., vae_list=None, **kwargs):
super(CIFARNet, self).__init__(**kwargs)
self.device = device
self.cfg = cfg
d1, d2, d3 = map(int, [128 * k, 256 * k, 512 * k])
if cfg == 'vanilla':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', nn.Conv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', nn.Conv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.BayesConv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.BayesConv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', bayes.BayesConv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', bayes.BayesConv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes1110':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.BayesConv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.BayesConv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', bayes.BayesConv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', nn.Conv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes1100':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.BayesConv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.BayesConv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', nn.Conv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes1000':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.BayesConv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', nn.Conv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', nn.Conv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes-mtrunca':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.MuTruncAlphaFFGConv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.MuTruncAlphaFFGConv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', bayes.MuTruncAlphaFFGConv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', bayes.MuTruncAlphaFFGConv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes1100-mtrunca':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.MuTruncAlphaFFGConv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.MuTruncAlphaFFGConv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', nn.Conv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes1000-mtrunca':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.MuTruncAlphaFFGConv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', nn.Conv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', nn.Conv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'vanilla-do':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, d1, 7)), # 128x26x26
('bn1', nn.BatchNorm2d(d1)),
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', nn.Conv2d(d1, d2, 5)), # 256x9x9
('bn2', nn.BatchNorm2d(d2)),
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('bn3', nn.BatchNorm2d(d2)),
('relu3', nn.LeakyReLU()),
('conv4', nn.Conv2d(d2, 512, 5)), # 512x1x1
('bn4', nn.BatchNorm2d(512)),
('relu4', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
else:
raise NotImplementedError
self.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(512, 512)),
('bn1', nn.BatchNorm1d(512)),
('relu1', nn.LeakyReLU()),
('linear', nn.Linear(512, n_classes))
]))
if self.device:
self.to(self.device)
def forward(self, input):
return self.classifier(self.features(input))
def weights_init(self, init_list, vae_list, flow_list=None, pretrained=None, filters_list=None):
self.apply(utils.weight_init(module=nn.Conv2d, initf=nn.init.xavier_normal_))
self.apply(utils.weight_init(module=nn.Linear, initf=nn.init.xavier_normal_))
self.apply(utils.weight_init(module=bayes.LogScaleConv2d, initf=utils.const_init(-10.)))
self.apply(utils.weight_init(module=bayes.LogScaleLinear, initf=utils.const_init(-10.)))
if len(init_list) > 0 and init_list[0] == 'pretrained':
assert len(init_list) == 1
w_pretrained = torch.load(pretrained)
for k, v in w_pretrained.items():
if k in self.state_dict():
self.state_dict()[k].data.copy_(v)
else:
tokens = k.split('.')
self.state_dict()['.'.join(tokens[:2] + ['mean'] + tokens[-1:])].data.copy_(v)
return
convs = [self.features.conv1, self.features.conv2, self.features.conv3, self.features.conv4]
for i, m in enumerate(convs):
init = init_list[i] if i < len(init_list) else 'xavier'
w = m.mean.weight if isinstance(m, bayes._Bayes) else m.weight
if init == 'vae':
vae_path = vae_list[i]
vae = utils.load_vae(vae_path, device=self.device)
z = torch.randn(w.size(0) * w.size(1), vae.encoder.z_dim, 1, 1).to(vae.device)
x = vae.decode(z)[0]
w.data = x.reshape(w.shape)
elif init == 'flow':
flow_path = flow_list[i]
flow = utils.load_flow(flow_path, device=self.device)
utils.flow_init(flow)(w)
elif init == 'xavier':
pass
elif init == 'filters':
filters = np.load(filters_list[i])
filters = np.concatenate([filters]*10)
N = np.prod(w.shape[:2])
filters = filters[np.random.permutation(len(filters))[:N]]
w.data = torch.from_numpy(filters.reshape(*w.shape)).to(self.device)
elif init == 'recon':
filters = np.load(filters_list[i])
filters = np.concatenate([filters]*10)
N = np.prod(w.shape[:2])
filters = filters[np.random.permutation(len(filters))[:N]]
vae_path = vae_list[i]
vae = utils.load_vae(vae_path, device=self.device)
filters = vae(torch.from_numpy(filters).to(self.device))[1][0]
w.data = filters.reshape_as(w)
else:
raise NotImplementedError('no {} init'.format(init))
def set_prior(self, prior_list, dwp_samples, vae_list, flow_list=None):
convs = [self.features.conv1, self.features.conv2, self.features.conv3, self.features.conv4]
for i, m in enumerate(convs):
if not isinstance(m, bayes._Bayes):
continue
if prior_list[i] == 'vae':
vae = utils.load_vae(vae_list[i], self.device)
vae = nn.DataParallel(vae)
for p in vae.parameters():
p.requires_grad = False
m.kl_function = utils.kl_dwp(vae, n_tries=dwp_samples)
elif prior_list[i] == 'flow':
flow = utils.load_flow(flow_list[i], self.device)
for p in flow.parameters():
p.requires_grad = False
m.kl_function = utils.kl_flow(flow, n_tries=dwp_samples)
elif prior_list[i] == 'sn':
m.kl_function = utils.kl_normal
m.prior = dist.Normal(torch.FloatTensor([0.]).to(self.device),
torch.FloatTensor([1.]).to(self.device))
elif prior_list[i] == 'loguniform':
if self.cfg in ['bayes-mtrunca', 'bayes1100-mtrunca', 'bayes1000-mtrunca']:
m.kl_function = utils.kl_loguniform_with_trunc_alpha
else:
raise NotImplementedError
elif prior_list[i] == 'no':
pass
else:
raise NotImplementedError
class CIFARNetNew(bayes._BayesNet):
def __init__(self, cfg, device=None, n_classes=10, do=[], k=1., vae_list=None,
logvar=-10., **kwargs):
super(CIFARNetNew, self).__init__(**kwargs)
self.device = device
self.cfg = cfg
self.vaes = []
d1, d2, d3 = map(int, [128 * k, 256 * k, 512 * k])
if cfg in ['vanilla', 'vanilla-nofc', 'vanilla-do']:
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, d1, 7)), # 128x26x26
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', nn.Conv2d(d1, d2, 5)), # 256x9x9
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('relu3', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes111' or cfg == 'bayes111-nofc':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.BayesConv2d(3, d1, 7)), # 128x26x26
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.BayesConv2d(d1, d2, 5)), # 256x9x9
('relu2', nn.LeakyReLU()),
('conv3', bayes.BayesConv2d(d2, d2, 5)), # 256x5x5
('relu3', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes111-mutrunca':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.MuTruncAlphaFFGConv2d(3, d1, 7)), # 128x26x26
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.MuTruncAlphaFFGConv2d(d1, d2, 5)), # 256x9x9
('relu2', nn.LeakyReLU()),
('conv3', bayes.MuTruncAlphaFFGConv2d(d2, d2, 5)), # 256x5x5
('relu3', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes110':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.BayesConv2d(3, d1, 7)), # 128x26x26
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', bayes.BayesConv2d(d1, d2, 5)), # 256x9x9
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('relu3', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
elif cfg == 'bayes100' or cfg == 'bayes100-nofc':
# 3x32x32
self.features = nn.Sequential(OrderedDict([
('conv1', bayes.BayesConv2d(3, d1, 7)), # 128x26x26
('relu1', nn.LeakyReLU()),
('maxpool', nn.MaxPool2d(2)), # 128x13x13
('conv2', nn.Conv2d(d1, d2, 5)), # 256x9x9
('relu2', nn.LeakyReLU()),
('conv3', nn.Conv2d(d2, d2, 5)), # 256x5x5
('relu3', nn.LeakyReLU()),
('flatten', Flatten()), # 512
]))
else:
raise NotImplementedError
if 'nofc' in self.cfg:
self.classifier = nn.Sequential(OrderedDict([
('linear', nn.Linear(d2 * 25, n_classes))
]))
print('====> CIFARNetNew without FC!!!!!')
elif 'do' in self.cfg:
self.classifier = nn.Sequential(OrderedDict([
('do1', nn.Dropout(0.5)),
('fc1', nn.Linear(d2 * 25, 512)),
('relu1', nn.LeakyReLU()),
('do2', nn.Dropout(0.2)),
('linear', nn.Linear(512, n_classes))
]))
else:
self.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(d2 * 25, 512)),
('relu1', nn.LeakyReLU()),
('linear', nn.Linear(512, n_classes))
]))
if self.device:
self.to(self.device)
def forward(self, input):
return self.classifier(self.features(input))
def weights_init(self, init_list, vae_list, flow_list=None, pretrained=None, filters_list=None, logvar=-10.):
if len(init_list) == 1 and init_list[0] == 'no':
return
self.apply(utils.weight_init(module=nn.Conv2d, initf=nn.init.xavier_normal_))
self.apply(utils.weight_init(module=nn.Linear, initf=nn.init.xavier_normal_))
self.apply(utils.weight_init(module=bayes.LogScaleConv2d, initf=utils.const_init(logvar)))
self.apply(utils.weight_init(module=bayes.LogScaleLinear, initf=utils.const_init(logvar)))
if len(init_list) > 0 and init_list[0] == 'pretrained':
assert len(init_list) == 1
w_pretrained = torch.load(pretrained)
for k, v in w_pretrained.items():
if k in self.state_dict():
self.state_dict()[k].data.copy_(v)
else:
tokens = k.split('.')
self.state_dict()['.'.join(tokens[:2] + ['mean'] + tokens[-1:])].data.copy_(v)
return
convs = [self.features.conv1, self.features.conv2, self.features.conv3]
for i, m in enumerate(convs):
init = init_list[i] if i < len(init_list) else 'xavier'
w = m.mean.weight if isinstance(m, bayes._Bayes) else m.weight
if init == 'vae':
vae_path = vae_list[i]
vae = utils.load_vae(vae_path, device=self.device)
z = torch.randn(w.size(0) * w.size(1), vae.encoder.z_dim, 1, 1).to(vae.device)
x = vae.decode(z)[0]
w.data = x.reshape(w.shape)
elif init == 'flow':
flow_path = flow_list[i]
flow = utils.load_flow(flow_path, device=self.device)
utils.flow_init(flow)(w)
elif init == 'xavier' or init == 'no':
pass
elif init == 'filters':
filters = np.load(filters_list[i])
filters = np.concatenate([filters]*10)
N = np.prod(w.shape[:2])
filters = filters[np.random.permutation(len(filters))[:N]]
w.data = torch.from_numpy(filters.reshape(*w.shape)).to(self.device)
elif init == 'recon':
filters = np.load(filters_list[i])
filters = np.concatenate([filters]*10)
N = np.prod(w.shape[:2])
filters = filters[np.random.permutation(len(filters))[:N]]
vae_path = vae_list[i]
vae = utils.load_vae(vae_path, device=self.device)
filters = vae(torch.from_numpy(filters).to(self.device))[1][0]
w.data = filters.reshape_as(w)
else:
raise NotImplementedError('no {} init'.format(init))
def set_prior(self, prior_list, dwp_samples, vae_list, flow_list=None):
convs = [self.features.conv1, self.features.conv2, self.features.conv3]
for i, m in enumerate(convs):
if not isinstance(m, bayes._Bayes):
continue
if prior_list[i] == 'vae':
vae = utils.load_vae(vae_list[i], self.device)
vae = nn.DataParallel(vae)
self.vaes.append(vae)
for p in vae.parameters():
p.requires_grad = False
m.kl_function = utils.kl_dwp(vae, n_tries=dwp_samples)
elif prior_list[i] == 'flow':
flow = utils.load_flow(flow_list[i], self.device)
for p in flow.parameters():
p.requires_grad = False
m.kl_function = utils.kl_flow(flow, n_tries=dwp_samples)
elif prior_list[i] == 'sn':
m.kl_function = utils.kl_normal
m.prior = dist.Normal(torch.FloatTensor([0.]).to(self.device),
torch.FloatTensor([1.]).to(self.device))
elif prior_list[i] == 'loguniform':
if self.cfg in ['bayes111-mutrunca']:
m.kl_function = utils.kl_loguniform_with_trunc_alpha
else:
raise NotImplementedError
elif prior_list[i] == 'no':
pass
else:
raise NotImplementedError
def set_dwp_regularizer(self, vae_list):
for path in vae_list:
vae = utils.load_vae(path, device=self.device)
for p in vae.parameters():
p.requires_grad = False
self.vaes.append(vae)
def get_dwp_reg(self, backward=False, n_tries=1, weight=1., target='elbo'):
modules = [self.features.conv1, self.features.conv2, self.features.conv3]
reg = 0.
for m, vae in zip(modules, self.vaes):
reg += utils.dwp_regularizer(vae, m, n_tries=n_tries, backward=backward, weight=weight, target=target)
return reg
| 41.384615
| 114
| 0.489709
| 2,312
| 22,058
| 4.586505
| 0.083045
| 0.056017
| 0.039042
| 0.027725
| 0.917201
| 0.912109
| 0.909751
| 0.909751
| 0.887778
| 0.867314
| 0
| 0.082745
| 0.357331
| 22,058
| 532
| 115
| 41.462406
| 0.665279
| 0.033684
| 0
| 0.858447
| 0
| 0
| 0.062674
| 0
| 0
| 0
| 0
| 0
| 0.004566
| 1
| 0.025114
| false
| 0.009132
| 0.015982
| 0.006849
| 0.063927
| 0.002283
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b68b33186122cb9074f12603b9c49710c042b8dc
| 21,100
|
py
|
Python
|
memsource_cli/api/machine_translation_settings_api.py
|
unofficial-memsource/memsource-cli-client
|
a6639506b74e95476da87f4375953448b76ea90c
|
[
"Apache-2.0"
] | 16
|
2019-09-25T00:20:38.000Z
|
2021-05-04T05:56:10.000Z
|
memsource_cli/api/machine_translation_settings_api.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 26
|
2019-09-30T14:00:03.000Z
|
2021-05-12T11:15:18.000Z
|
memsource_cli/api/machine_translation_settings_api.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 1
|
2021-05-24T16:19:14.000Z
|
2021-05-24T16:19:14.000Z
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from memsource_cli.api_client import ApiClient
class MachineTranslationSettingsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_list(self, **kwargs): # noqa: E501
"""List machine translate settings # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoMachineTranslateSettingsPbmDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_list_with_http_info(**kwargs) # noqa: E501
return data
def get_list_with_http_info(self, **kwargs): # noqa: E501
"""List machine translate settings # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoMachineTranslateSettingsPbmDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list" % key
)
params[key] = val
del params['kwargs']
if 'page_number' in params and params['page_number'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `page_number` when calling `get_list`, must be a value greater than or equal to `0`") # noqa: E501
if 'page_size' in params and params['page_size'] > 50: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `get_list`, must be a value less than or equal to `50`") # noqa: E501
if 'page_size' in params and params['page_size'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `get_list`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('pageNumber', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/machineTranslateSettings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDtoMachineTranslateSettingsPbmDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_machine_translate_settings_for_project_template(self, project_template_id, **kwargs): # noqa: E501
"""Get machine translate settings # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_machine_translate_settings_for_project_template(project_template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str project_template_id: (required)
:return: MTSettingsPerLanguageListDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_machine_translate_settings_for_project_template_with_http_info(project_template_id, **kwargs) # noqa: E501
else:
(data) = self.get_machine_translate_settings_for_project_template_with_http_info(project_template_id, **kwargs) # noqa: E501
return data
def get_machine_translate_settings_for_project_template_with_http_info(self, project_template_id, **kwargs): # noqa: E501
"""Get machine translate settings # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_machine_translate_settings_for_project_template_with_http_info(project_template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str project_template_id: (required)
:return: MTSettingsPerLanguageListDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_template_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_machine_translate_settings_for_project_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_template_id' is set
if ('project_template_id' not in params or
params['project_template_id'] is None):
raise ValueError("Missing the required parameter `project_template_id` when calling `get_machine_translate_settings_for_project_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_template_id' in params:
path_params['projectTemplateId'] = params['project_template_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/projectTemplates/{projectTemplateId}/mtSettings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MTSettingsPerLanguageListDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_mt_settings(self, id, **kwargs): # noqa: E501
"""Get machine translate settings # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_mt_settings(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: MachineTranslateSettingsPbmDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_mt_settings_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_mt_settings_with_http_info(id, **kwargs) # noqa: E501
return data
def get_mt_settings_with_http_info(self, id, **kwargs): # noqa: E501
"""Get machine translate settings # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_mt_settings_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: MachineTranslateSettingsPbmDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_mt_settings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_mt_settings`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/machineTranslateSettings/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MachineTranslateSettingsPbmDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_status(self, id, **kwargs): # noqa: E501
"""Get status of machine translate engine # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_status(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: MachineTranslateStatusDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_status_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_status_with_http_info(id, **kwargs) # noqa: E501
return data
def get_status_with_http_info(self, id, **kwargs): # noqa: E501
"""Get status of machine translate engine # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_status_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: MachineTranslateStatusDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/machineTranslateSettings/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MachineTranslateStatusDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_translation_resources(self, project_uid, job_uid, **kwargs): # noqa: E501
"""Get translation resources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_translation_resources(project_uid, job_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str project_uid: (required)
:param str job_uid: (required)
:return: TranslationResourcesDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_translation_resources_with_http_info(project_uid, job_uid, **kwargs) # noqa: E501
else:
(data) = self.get_translation_resources_with_http_info(project_uid, job_uid, **kwargs) # noqa: E501
return data
def get_translation_resources_with_http_info(self, project_uid, job_uid, **kwargs): # noqa: E501
"""Get translation resources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_translation_resources_with_http_info(project_uid, job_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str project_uid: (required)
:param str job_uid: (required)
:return: TranslationResourcesDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_uid', 'job_uid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_translation_resources" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_uid' is set
if ('project_uid' not in params or
params['project_uid'] is None):
raise ValueError("Missing the required parameter `project_uid` when calling `get_translation_resources`") # noqa: E501
# verify the required parameter 'job_uid' is set
if ('job_uid' not in params or
params['job_uid'] is None):
raise ValueError("Missing the required parameter `job_uid` when calling `get_translation_resources`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_uid' in params:
path_params['projectUid'] = params['project_uid'] # noqa: E501
if 'job_uid' in params:
path_params['jobUid'] = params['job_uid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/projects/{projectUid}/jobs/{jobUid}/translationResources', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TranslationResourcesDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.962121
| 421
| 0.619052
| 2,427
| 21,100
| 5.123197
| 0.089823
| 0.052115
| 0.022519
| 0.028953
| 0.885636
| 0.866334
| 0.849847
| 0.828374
| 0.809635
| 0.784864
| 0
| 0.019336
| 0.291659
| 21,100
| 527
| 422
| 40.037951
| 0.812592
| 0.323365
| 0
| 0.715302
| 0
| 0.010676
| 0.216524
| 0.069342
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039146
| false
| 0
| 0.014235
| 0
| 0.11032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b69a1ac9e4bd19cae9c76c9d055daa994f7badc3
| 1,954
|
py
|
Python
|
source/00/logo.py
|
schef/schef.github.io
|
ac6fc70e5077deeeb8233ede89e0895fdc2a0d05
|
[
"MIT"
] | null | null | null |
source/00/logo.py
|
schef/schef.github.io
|
ac6fc70e5077deeeb8233ede89e0895fdc2a0d05
|
[
"MIT"
] | null | null | null |
source/00/logo.py
|
schef/schef.github.io
|
ac6fc70e5077deeeb8233ede89e0895fdc2a0d05
|
[
"MIT"
] | null | null | null |
def print_logo():
print(" dP dP ")
print(" 88 88 ")
print(" .d8888b. 88d888b. .d8888b. .d8888b. 88 dP dP d8888P .d8888b. ")
print(" 88' `88 88' `88 Y8ooooo. 88' `88 88 88 88 88 88ooood8 ")
print(" 88. .88 88. .88 88 88. .88 88 88. .88 88 88. ... ")
print(" `88888P8 88Y888P' `88888P' `88888P' dP `88888P' dP `88888P' ")
print(" 88 ")
print(" dP ")
print(" oo dP dP ")
print(" 88 88 ")
print(" 88d888b. dP d8888P .d8888b. 88d888b. ")
print(" 88' `88 88 88 88' `"" 88' `88 ")
print(" 88. .88 88 88 88. ... 88 88 ")
print(" 88Y888P' dP dP `88888P' dP dP ")
print(" 88 ")
print(" dP ")
print(" dP oo oo ")
print(" 88 ")
print(" d8888P 88d888b. .d8888b. dP 88d888b. dP 88d888b. .d8888b. ")
print(" 88 88' `88 88' `88 88 88' `88 88 88' `88 88' `88 ")
print(" 88 88 88. .88 88 88 88 88 88 88 88. .88 ")
print(" dP dP `88888P8 dP dP dP dP dP dP `8888P88 ")
print(" .88 ")
print(" d8888P ")
| 75.153846
| 85
| 0.265609
| 155
| 1,954
| 3.341935
| 0.096774
| 0.432432
| 0.544402
| 0.617761
| 0.511583
| 0.415058
| 0.274131
| 0.274131
| 0.274131
| 0.254826
| 0
| 0.386591
| 0.641249
| 1,954
| 25
| 86
| 78.16
| 0.352354
| 0
| 0
| 0.28
| 0
| 0.08
| 0.81781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| true
| 0
| 0
| 0
| 0.04
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
fcb12093f0403df0fd625ae693b03a886413fd2c
| 2,524
|
py
|
Python
|
frontstage_api/controllers/collection_exercise_controller.py
|
ONSdigital/ras-frontstage-api
|
7bb32a85868e2a241b8a0331b884155a36450669
|
[
"MIT"
] | 2
|
2018-03-05T11:58:51.000Z
|
2018-03-06T12:33:59.000Z
|
frontstage_api/controllers/collection_exercise_controller.py
|
ONSdigital/ras-frontstage-api
|
7bb32a85868e2a241b8a0331b884155a36450669
|
[
"MIT"
] | 34
|
2017-10-17T10:50:18.000Z
|
2018-07-31T09:04:40.000Z
|
frontstage_api/controllers/collection_exercise_controller.py
|
ONSdigital/ras-frontstage-api
|
7bb32a85868e2a241b8a0331b884155a36450669
|
[
"MIT"
] | 1
|
2021-04-11T08:14:40.000Z
|
2021-04-11T08:14:40.000Z
|
import logging
from structlog import wrap_logger
from frontstage_api import app
from frontstage_api.common.request_handler import request_handler
from frontstage_api.exceptions.exceptions import ApiError
logger = wrap_logger(logging.getLogger(__name__))
def get_collection_exercise(collection_exercise_id):
logger.debug('Retrieving collection exercise', collection_exercise_id=collection_exercise_id)
url = f"{app.config['RM_COLLECTION_EXERCISE_SERVICE']}/collectionexercises/{collection_exercise_id}"
response = request_handler('GET', url, auth=app.config['BASIC_AUTH'])
if response.status_code != 200:
raise ApiError(url=url, status_code=response.status_code,
description='Failed to retrieve collection exercise',
collection_exercise_id=collection_exercise_id)
logger.debug('Successfully retrieved collection exercise', collection_exercise_id=collection_exercise_id)
return response.json()
def get_collection_exercise_events(collection_exercise_id):
logger.debug('Retrieving collection exercise events', collection_exercise_id=collection_exercise_id)
url = f"{app.config['RM_COLLECTION_EXERCISE_SERVICE']}/collectionexercises/{collection_exercise_id}/events"
response = request_handler('GET', url, auth=app.config['BASIC_AUTH'])
if response.status_code != 200:
raise ApiError(url=url, status_code=response.status_code,
description='Failed to retrieve collection exercise events',
collection_exercise_id=collection_exercise_id)
logger.debug('Successfully retrieved collection exercise events', collection_exercise_id=collection_exercise_id)
return response.json()
def get_collection_exercise_event(collection_exercise_id, tag):
logger.debug('Retrieving collection exercise event', collection_exercise_id=collection_exercise_id, tag=tag)
url = f"{app.config['RM_COLLECTION_EXERCISE_SERVICE']}/collectionexercises/{collection_exercise_id}/events/{tag}"
response = request_handler('GET', url, auth=app.config['BASIC_AUTH'])
if response.status_code != 200:
raise ApiError(url=url, status_code=response.status_code,
description='Failed to retrieve collection exercise event',
collection_exercise_id=collection_exercise_id, tag=tag)
logger.debug('Successfully retrieved collection exercise event', collection_exercise_id=collection_exercise_id, tag=tag)
return response.json()
| 45.890909
| 124
| 0.763867
| 295
| 2,524
| 6.227119
| 0.162712
| 0.382145
| 0.261296
| 0.146979
| 0.868263
| 0.847033
| 0.812738
| 0.812738
| 0.758302
| 0.738704
| 0
| 0.004208
| 0.152536
| 2,524
| 54
| 125
| 46.740741
| 0.854605
| 0
| 0
| 0.388889
| 0
| 0
| 0.277734
| 0.116086
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.138889
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fcbc7a20d39af03d92f63f4afee57967173ff4a4
| 13,428
|
py
|
Python
|
bootstrap_create_figures.py
|
Warvito/Normative-modelling-using-deep-autoencoders
|
54972ca7b503f023438dde1d08b5cfdbdc5a84a0
|
[
"MIT"
] | 12
|
2020-02-10T10:12:09.000Z
|
2022-02-20T11:45:01.000Z
|
bootstrap_create_figures.py
|
Warvito/Normative-modelling-using-deep-autoencoders
|
54972ca7b503f023438dde1d08b5cfdbdc5a84a0
|
[
"MIT"
] | 2
|
2021-11-10T19:40:21.000Z
|
2022-02-09T23:34:33.000Z
|
bootstrap_create_figures.py
|
Warvito/Normative-modelling-using-deep-autoencoders
|
54972ca7b503f023438dde1d08b5cfdbdc5a84a0
|
[
"MIT"
] | 4
|
2020-08-31T04:52:10.000Z
|
2021-07-06T11:17:11.000Z
|
#!/usr/bin/env python3
"""
Script to create Figure 2 of the paper.
"""
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from utils import load_dataset
PROJECT_ROOT = Path.cwd()
def main():
"""Create elements for figure 2 of the paper"""
# ----------------------------------------------------------------------------
n_bootstrap = 1000
model_name = 'supervised_aae'
outputs_dir = PROJECT_ROOT / 'outputs'
bootstrap_dir = outputs_dir / 'bootstrap_analysis'
model_dir = bootstrap_dir / model_name
# ----------------------------------------------------------------------------
dataset_name = 'ADNI'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
adni_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_adni_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[adni_df['Diagn'] == 1]['Reconstruction error']
error_emci = reconstruction_error_df.loc[adni_df['Diagn'] == 27]['Reconstruction error']
error_lmci = reconstruction_error_df.loc[adni_df['Diagn'] == 28]['Reconstruction error']
error_ad = reconstruction_error_df.loc[adni_df['Diagn'] == 17]['Reconstruction error']
mean_adni_list.append([error_hc.mean(), error_emci.mean(), error_lmci.mean(), error_ad.mean()])
mean_adni_list = np.array(mean_adni_list)
plt.hlines(range(4),
np.percentile(mean_adni_list, 2.5, axis=0),
np.percentile(mean_adni_list, 97.5, axis=0))
plt.plot(np.mean(mean_adni_list, axis=0), range(4), 's', color='k')
plt.savefig(bootstrap_dir / 'ADNI.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'EMCI', 'LMCI', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_adni_list, axis=0)[0],
'EMCI': np.mean(mean_adni_list, axis=0)[1],
'LMCI': np.mean(mean_adni_list, axis=0)[2],
'AD': np.mean(mean_adni_list, axis=0)[3], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_adni_list, 2.5, axis=0)[0],
'EMCI': np.percentile(mean_adni_list, 2.5, axis=0)[1],
'LMCI': np.percentile(mean_adni_list, 2.5, axis=0)[2],
'AD': np.percentile(mean_adni_list, 2.5, axis=0)[3], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_adni_list, 97.5, axis=0)[0],
'EMCI': np.percentile(mean_adni_list, 97.5, axis=0)[1],
'LMCI': np.percentile(mean_adni_list, 97.5, axis=0)[2],
'AD': np.percentile(mean_adni_list, 97.5, axis=0)[3], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'AIBL'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
brescia_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_brescia_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[brescia_df['Diagn'] == 1]['Reconstruction error']
error_mci = reconstruction_error_df.loc[brescia_df['Diagn'] == 18]['Reconstruction error']
error_ad = reconstruction_error_df.loc[brescia_df['Diagn'] == 17]['Reconstruction error']
mean_brescia_list.append([error_hc.mean(), error_mci.mean(), error_ad.mean()])
mean_brescia_list = np.array(mean_brescia_list)
plt.hlines(range(3),
np.percentile(mean_brescia_list, 2.5, axis=0),
np.percentile(mean_brescia_list, 97.5, axis=0))
plt.plot(np.mean(mean_brescia_list, axis=0), range(3), 's', color='k')
plt.savefig(bootstrap_dir / 'AIBL.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'MCI', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_brescia_list, axis=0)[0],
'MCI': np.mean(mean_brescia_list, axis=0)[1],
'AD': np.mean(mean_brescia_list, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_brescia_list, 2.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 2.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 2.5, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_brescia_list, 97.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 97.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 97.5, axis=0)[2], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'TOMC'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
brescia_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_brescia_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[brescia_df['Diagn'] == 1]['Reconstruction error']
error_mci = reconstruction_error_df.loc[brescia_df['Diagn'] == 18]['Reconstruction error']
error_ad = reconstruction_error_df.loc[brescia_df['Diagn'] == 17]['Reconstruction error']
mean_brescia_list.append([error_hc.mean(), error_mci.mean(), error_ad.mean()])
mean_brescia_list = np.array(mean_brescia_list)
plt.hlines(range(3),
np.percentile(mean_brescia_list, 2.5, axis=0),
np.percentile(mean_brescia_list, 97.5, axis=0))
plt.plot(np.mean(mean_brescia_list, axis=0), range(3), 's', color='k')
plt.savefig(bootstrap_dir / 'TOMC.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'MCI', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_brescia_list, axis=0)[0],
'MCI': np.mean(mean_brescia_list, axis=0)[1],
'AD': np.mean(mean_brescia_list, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_brescia_list, 2.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 2.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 2.5, axis=0)[2], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_brescia_list, 97.5, axis=0)[0],
'MCI': np.percentile(mean_brescia_list, 97.5, axis=0)[1],
'AD': np.percentile(mean_brescia_list, 97.5, axis=0)[2], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'OASIS1'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
oasis1_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_oasis1_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[oasis1_df['Diagn'] == 1]['Reconstruction error']
error_ad = reconstruction_error_df.loc[oasis1_df['Diagn'] == 17]['Reconstruction error']
mean_oasis1_list.append([error_hc.mean(), error_ad.mean()])
mean_oasis1_list = np.array(mean_oasis1_list)
plt.hlines(range(2),
np.percentile(mean_oasis1_list, 2.5, axis=0),
np.percentile(mean_oasis1_list, 97.5, axis=0))
plt.plot(np.mean(mean_oasis1_list, axis=0), range(2), 's', color='k')
plt.savefig(bootstrap_dir / 'OASIS1.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_oasis1_list, axis=0)[0],
'AD': np.mean(mean_oasis1_list, axis=0)[1], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_oasis1_list, 2.5, axis=0)[0],
'AD': np.percentile(mean_oasis1_list, 2.5, axis=0)[1], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_oasis1_list, 97.5, axis=0)[0],
'AD': np.percentile(mean_oasis1_list, 97.5, axis=0)[1], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
# ----------------------------------------------------------------------------
dataset_name = 'MIRIAD'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
oasis1_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_oasis1_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[oasis1_df['Diagn'] == 1]['Reconstruction error']
error_ad = reconstruction_error_df.loc[oasis1_df['Diagn'] == 17]['Reconstruction error']
mean_oasis1_list.append([error_hc.mean(), error_ad.mean()])
mean_oasis1_list = np.array(mean_oasis1_list)
plt.hlines(range(2),
np.percentile(mean_oasis1_list, 2.5, axis=0),
np.percentile(mean_oasis1_list, 97.5, axis=0))
plt.plot(np.mean(mean_oasis1_list, axis=0), range(2), 's', color='k')
plt.savefig(bootstrap_dir / 'MIRIAD.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_oasis1_list, axis=0)[0],
'AD': np.mean(mean_oasis1_list, axis=0)[1], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_oasis1_list, 2.5, axis=0)[0],
'AD': np.percentile(mean_oasis1_list, 2.5, axis=0)[1], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_oasis1_list, 97.5, axis=0)[0],
'AD': np.percentile(mean_oasis1_list, 97.5, axis=0)[1], }, ignore_index=True)
results.to_csv(bootstrap_dir / dataset_name / 'deviations.csv', index=False)
if __name__ == "__main__":
main()
| 51.448276
| 108
| 0.60076
| 1,678
| 13,428
| 4.530393
| 0.06913
| 0.03749
| 0.079979
| 0.024993
| 0.931465
| 0.922783
| 0.912786
| 0.88503
| 0.878321
| 0.867009
| 0
| 0.026839
| 0.228627
| 13,428
| 260
| 109
| 51.646154
| 0.707086
| 0.042076
| 0
| 0.744898
| 0
| 0
| 0.109078
| 0.009343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005102
| false
| 0
| 0.030612
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1e2200a230b5fa1ee0c0616792bdc35f92d07455
| 282
|
py
|
Python
|
tests/test_get_stable_release_version_number.py
|
kant/blender-downloader
|
0acaec48d384a8951056d463a8939167e30ea1d4
|
[
"BSD-3-Clause"
] | 1
|
2021-02-14T00:49:15.000Z
|
2021-02-14T00:49:15.000Z
|
tests/test_get_stable_release_version_number.py
|
kant/blender-downloader
|
0acaec48d384a8951056d463a8939167e30ea1d4
|
[
"BSD-3-Clause"
] | 22
|
2021-02-13T20:51:33.000Z
|
2022-01-11T17:24:39.000Z
|
tests/test_get_stable_release_version_number.py
|
kant/blender-downloader
|
0acaec48d384a8951056d463a8939167e30ea1d4
|
[
"BSD-3-Clause"
] | 2
|
2021-06-22T09:38:29.000Z
|
2022-01-01T21:37:02.000Z
|
"""Test that the stable version number can be retrieved from Blender website."""
import re
from blender_downloader import get_stable_release_version_number
def test_get_stable_release_version_number():
assert re.match(r"^\d+\.\d+\.\d+", get_stable_release_version_number())
| 28.2
| 80
| 0.787234
| 42
| 282
| 4.952381
| 0.52381
| 0.25
| 0.230769
| 0.331731
| 0.418269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109929
| 282
| 9
| 81
| 31.333333
| 0.828685
| 0.262411
| 0
| 0
| 0
| 0
| 0.069307
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
1e66869116b61d2c59c52e1ffb6ad8d78d9154d9
| 51,734
|
py
|
Python
|
20210803/main.py
|
Brook1711/openda1
|
1d67912083ecf60b04daa6d9cf377339d179b1aa
|
[
"Apache-2.0"
] | null | null | null |
20210803/main.py
|
Brook1711/openda1
|
1d67912083ecf60b04daa6d9cf377339d179b1aa
|
[
"Apache-2.0"
] | null | null | null |
20210803/main.py
|
Brook1711/openda1
|
1d67912083ecf60b04daa6d9cf377339d179b1aa
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import json
import numpy as np
import ast
from datetime import datetime
import plotly.graph_objs as go
from plotly.offline import plot
import plotly.offline as offline
from pandas.core.indexes import interval
import re
from pathlib import Path
import os
class data_analysis:
def __init__(self, df, name = 'default') -> None:
self.accuracy_list, self.addition_list =0, 0
self.name = name
self.with_successrate = [0, 1]
self.df = df
self.problem_num = len(ast.literal_eval(self.df.loc[0, 'task_answers']))
self.row_num = len(self.df)
self.df.insert(len(self.df.columns), 'ans', self.remove_str())
self.df.insert(len(self.df.columns), 'interval', self.get_interval()[0])
self.df.insert(len(self.df.columns), 'day', self.get_interval()[1])
self.df.insert(len(self.df.columns), 'start_hour', self.get_interval()[2])
self.df.insert(len(self.df.columns), 'end_hour', self.get_interval()[3])
self.df.insert(len(self.df.columns), 'start_time_float', self.get_interval()[4])
if -1 in self.df.sort_values(by='start_hour', ascending=True).groupby('start_hour').groups.keys():
self.df = self.df.drop(list(self.df.sort_values(by='start_hour', ascending=True).groupby('start_hour').groups[-1]))
self.df = self.df.reset_index(drop=True)
self.row_num = len(self.df)
self.ndf = pd.DataFrame(self.create_new_df())
self.ndf_list = self.divide_ndf()
self.group_list = self.group_by()
self.count_df_list = self.count_group()
# self.addition_list, self.success_df,self.problem_num_list = self.get_addition()
# self.output_df = 0
# self.output()
print('init complete')
def remove_time_error(self):
df_rm_time_err = 0
return df_rm_time_err
def remove_str_per_row(self, data_per_row):
frame_list = ast.literal_eval(data_per_row)
frame_dic_list = []
for index in range(len(frame_list)):
if frame_list[index] == '':
frame_dic_list.append({'frame':'0'})
else:
frame_dic_list.append(json.loads(frame_list[index]))
return frame_dic_list
def remove_str(self):
ndf_ans_8_list = []
ndf_rm_frame = []
for i in range(self.row_num):
dic_temp = self.remove_str_per_row(self.df.loc[i,'task_answers'])
ndf_ans_8_list.append(dic_temp)
new_dic_list = []
for dic in dic_temp:
dic = dic['frame']
new_dic = dic
new_dic_list.append(new_dic)
ndf_rm_frame.append(new_dic_list)
return ndf_rm_frame
def get_interval(self):
interval_list = []
day_list = []
start_hour_list = []
stop_hour_list = []
start_time_list = []
for i in range(len(self.df)):
interval_list.append(self.get_interval_per_row(i)[0])
day_list.append(self.get_interval_per_row(i)[1])
start_hour_list.append(self.get_interval_per_row(i)[2])
stop_hour_list.append(self.get_interval_per_row(i)[3])
start_time_list.append(self.get_interval_per_row(i)[4])
return [interval_list, day_list, start_hour_list, stop_hour_list, start_time_list]
def get_interval_per_row(self, index):
row_data = self.df.loc[index,:]
start_time = row_data['start_time']
if start_time != start_time:
return -1, -1, -1, -1, -1
start_time = datetime.strptime(start_time,"%Y-%m-%dT%H:%M:%S+08:00")
expire_time = row_data['expire_time']
expire_time = datetime.strptime(expire_time,"%Y-%m-%dT%H:%M:%S+08:00")
stop_time = row_data['stop_time']
if stop_time != stop_time:
return -1, -1, -1, -1, -1
stop_time = datetime.strptime(stop_time,"%Y-%m-%dT%H:%M:%S+08:00")
total_sec = (stop_time - start_time).seconds
return [total_sec, str(start_time.month)+str(start_time.day), start_time.hour, stop_time.hour, start_time.hour+start_time.minute/60.0]
def create_new_df(self):
twoD_list = []
for row in range(self.row_num):
ans_dic_list = self.df.loc[row, 'ans']
twoD_list.append(ans_dic_list)
return twoD_list
def divide_ndf(self):
ndf_list = []
for i in range(len(self.ndf.columns)):
ndf_list.append(pd.DataFrame(self.ndf.loc[:,i]))
return ndf_list
def group_by_per_problem(self, index):
df_temp = self.ndf_list[index]
df_str_list = []
for j in range(len(df_temp)):
ndf_index_j = df_temp.iloc[j, 0]
if ndf_index_j == None:
df_str_list.append(str(None))
else:
df_str_list.append(self.content_to_str(ndf_index_j))
df_temp.insert(1, 'ans_str', df_str_list)
df_per_problom = df_temp.groupby('ans_str')
return df_per_problom
def content_to_str(self, data):
if data == None or type(data) == str:
return str(None)
elif type(data) == type([]):
return self.data_to_str(data)
elif 'data' in data.keys():
return self.data_to_str(data['data'])
else:
return self.data_to_str(data)
def data_to_str(self, data):
if type(data) == type({}):
return str(list(data.values()))
else:
return str(data)
def main_df_process(self):
return 0
def group_by(self):
group_list = []
for i in range(self.problem_num):
df_temp = self.group_by_per_problem(i)
group_list.append(df_temp)
return group_list
def count_group(self):
count_df_list = []
for group in self.group_list:
count_df_list.append(group.count())
return count_df_list
def plot(self):
data = [go.Histogram(x=list(self.df.loc[:,'interval']))]
layout={"title": "学生用时分布",
"xaxis_title": "学生用时,单位秒",
"yaxis_title": "学生个数",
# x轴坐标倾斜60度
"xaxis": {"tickangle": 60}
}
fig = go.Figure(data=data,layout=layout)
plot(fig,filename="./plot/"+self.name+"/time.html",auto_open=False,image='png',image_height=800,image_width=1500)
# offline.iplot(fig)
return 0
def plot_problem(self):
data = [go.Bar(x = list(range(self.problem_num)), y = [len(list(group)) for group in self.group_list])]
layout={"title": "不同题目的编码数量",
"xaxis_title": "题目编号",
"yaxis_title": "编码个数",
# x轴坐标倾斜60度
"xaxis": {"tickangle": 60}
}
fig = go.Figure(data=data,layout=layout)
plot(fig,filename="./plot/"+self.name+"/plot_problem.html",auto_open=False,image='png',image_height=800,image_width=1500)
# offline.iplot(fig)
return 0
def output(self):
tar_path = './output/added_columns/'+self.name
if not os.path.exists(tar_path):
os.makedirs(tar_path)
self.df.to_excel(tar_path+'/' +'data_added_columns.xlsx')
def calculate_acc(self):
accuracy_list = []
addition_list = []
for i, df in enumerate(self.count_df_list):
print(i)
additional_infor_df = pd.DataFrame({'list':[ast.literal_eval(index) for index in df.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( df.iloc[:, 0]))
# additional_infor_df.to_excel('./output/all'+'/' +str(i) + '_count.xlsx')
if i in [0, 1]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) != 4:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( additional_infor_df.loc[:, 'count'])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['0' if l==None else str(l[0]) for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [2,3]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) == 0:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != str:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( additional_infor_df.loc[:, 'count'])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l!= None and len(l)!=0 and l[0]=='00' else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [5]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) == 0:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != str:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( additional_infor_df.loc[:, 'count'])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l!= None and len(l)==2 and l[0]+l[1]=='B_AC_A' else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [6]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) == 0:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != str:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( additional_infor_df.loc[:, 'count'])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l!= None and len(l)==5 and l[0]+l[1]+l[2]+l[3]+l[4]=='B_AC_AG_FD_BE_B' else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [7]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) != 4:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != list or type(additional_infor_df.iloc[j,0][1]) != list or type(additional_infor_df.iloc[j,0][2]) != list or type(additional_infor_df.iloc[j,0][3]) != list:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( additional_infor_df.loc[:, 'count'])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l!= None and len(l)==4 and [[0,1],[0,2],[1,2]] in l and [[0,4],[0,5],[1,5]] in l and [[0,6],[0,7],[1,7]] in l and [[0,10],[0,11],[1,11]] else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [8] :
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) != 5:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != list or type(additional_infor_df.iloc[j,0][1]) != list or type(additional_infor_df.iloc[j,0][2]) != list or type(additional_infor_df.iloc[j,0][3]) != list or type(additional_infor_df.iloc[j,0][4]) != list:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( additional_infor_df.loc[:, 'count'])])
verify_list = [[[1, 0], [2, 0], [3, 0], [3, 1]], [[0, 1], [1, 1], [2, 1], [2, 2]], [[3, 2], [4, 2], [5, 2], [5, 3]], [[2, 3], [3, 3], [4, 3], [4, 4]], [[0, 4], [1, 4], [2, 4], [2, 5]]]
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l!= None and len(l)==5 and verify_list[0] in l and verify_list[1] in l and verify_list[2] in l and verify_list[3] in l and verify_list[4] in l else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [9]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) == 0:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != str:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row,'list']
if list_temp!=None and len(list_temp)==2:
if list_temp[0][0:2] > list_temp[0][-2:]:
list_temp[0] = list_temp[0][-2:] + '_' + list_temp[0][0:2]
if list_temp[1][0:2] > list_temp[1][-2:]:
list_temp[1] = list_temp[1][-2:] + '_' + list_temp[1][0:2]
if list_temp[0] > list_temp[1]:
additional_infor_df._set_value(row,'list', str([list_temp[1], list_temp[0]]))
else:
additional_infor_df._set_value(row,'list', str(list_temp))
else:
additional_infor_df._set_value(row,'list', str(list_temp))
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[ast.literal_eval(index) for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
verify_list = [['02_09', '05_06'],['02_05', '06_09'],['02_06', '05_09']]
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l!= None and len(l)==2 and l in verify_list else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [10]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) == 0:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != str:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
list_temp = [[int(rebuild_i) for rebuild_i in re.findall(r"\d+", rebuild)] for rebuild in list_temp]
for list_mem in list_temp:
list_mem.sort()
list_temp.sort()
additional_infor_df._set_value(row,'list', str([str(list_str[0]) + '_' + str(list_str[1]) for list_str in list_temp]))
else:
additional_infor_df._set_value(row,'list', str(list_temp))
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[ast.literal_eval(index) for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
verify_list = ['2','6','12','14','15','16']
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l!= None and len(l)==3 and len(set([re.findall(r"\d+",l[0])[0], re.findall(r"\d+",l[0])[1], re.findall(r"\d+",l[1])[0], re.findall(r"\d+",l[1])[1], re.findall(r"\d+",l[2])[0],re.findall(r"\d+",l[2])[1]]))==6 and re.findall(r"\d+",l[0])[0] in verify_list and re.findall(r"\d+",l[0])[1] in verify_list and re.findall(r"\d+",l[1])[0] in verify_list and re.findall(r"\d+",l[1])[1] in verify_list and re.findall(r"\d+",l[2])[0] in verify_list and re.findall(r"\d+",l[2])[1] in verify_list else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [11]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) == 0:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0]) == 0:
drop_index_list.append(j)
elif None in additional_infor_df.iloc[j,0][0]:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', "".join(re.findall(r"\d+", str(list_temp))))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_str = '0012210224'
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l==verify_str else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [12]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) == 0:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0]) == 0:
drop_index_list.append(j)
elif None in additional_infor_df.iloc[j,0][0]:
drop_index_list.append(j)
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', "".join(re.findall(r"\d+", str(list_temp))))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_str = '2213110425'
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l==verify_str else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [13]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) != 2:
drop_index_list.append(j)
else:
for n in additional_infor_df.iloc[j,0]:
if type(n) != int:
drop_index_list.append(j)
break
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', "".join(re.findall(r"\d+", str(list_temp))))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
verify_str = '21'
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l==verify_str else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [14]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0]) != 6:
drop_index_list.append(j)
else:
for n in additional_infor_df.iloc[j,0]:
if type(n) != int:
drop_index_list.append(j)
break
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', "".join(re.findall(r"\d+", str(list_temp))))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_str = '121223'
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l==verify_str else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [15]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0])==0 or type(additional_infor_df.iloc[j,0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0])==0 or type(additional_infor_df.iloc[j,0][0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0][0])!=3:
drop_index_list.append(j)
else:
for n in additional_infor_df.iloc[j,0][0][0]:
if type(n) != int:
drop_index_list.append(j)
break
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', "".join(re.findall(r"\d+", str(list_temp))))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_list = ['0', '2']
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if len(l)==3 and l[0] in verify_list and l[1] in verify_list and l[2] in verify_list else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [16]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0])==0 or type(additional_infor_df.iloc[j,0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0])==0 or type(additional_infor_df.iloc[j,0][0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0][0])!=6:
drop_index_list.append(j)
else:
for n in additional_infor_df.iloc[j,0][0][0]:
if type(n) != int:
drop_index_list.append(j)
break
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', "".join(re.findall(r"\d+", str(list_temp))))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_str = '000000'
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l.replace('2','0')==verify_str else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [17]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0])==0 or type(additional_infor_df.iloc[j,0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0])!=7:
drop_index_list.append(j)
else:
for n in additional_infor_df.iloc[j,0][0]:
if n not in [0, 1]:
drop_index_list.append(j)
break
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', "".join(re.findall(r"\d+", str(list_temp))))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_str = '1010011001'
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l==verify_str else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [18]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0])==0 or type(additional_infor_df.iloc[j,0][0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0][0])<1:
drop_index_list.append(j)
else:
for n in additional_infor_df.iloc[j,0][0]:
if n not in [0, 1]:
drop_index_list.append(j)
break
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', str(list_temp))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_list = ['[[0, 0, 1, 1, 0, 1, 0], [[1, 0], [0]]]','[[1, 1, 0, 0, 1, 0, 1], [[0], [1, 0]]]']
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l in verify_list else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
elif i in [19]:
drop_index_list = []
for j in range(len(additional_infor_df)):
if additional_infor_df.iloc[j,0]== None:
drop_index_list.append(j)
elif type(additional_infor_df.iloc[j,0]) != list:
drop_index_list.append(j)
elif len(additional_infor_df.iloc[j,0])!=4 or type(additional_infor_df.iloc[j,0][0]) != list or type(additional_infor_df.iloc[j,0][1]) != int or type(additional_infor_df.iloc[j,0][2]) != int:
drop_index_list.append(j)
else:
for n in additional_infor_df.iloc[j,0][0]:
if n not in [0, 1, 2]:
drop_index_list.append(j)
break
additional_infor_df = additional_infor_df.drop(drop_index_list)
additional_infor_df = additional_infor_df.reset_index(drop=True)
for row in range(len(additional_infor_df)):
list_temp = additional_infor_df.loc[row, 'list']
if list_temp!=None:
additional_infor_df._set_value(row,'list', str(list_temp))
else:
additional_infor_df._set_value(row,'list', '')
grouped = additional_infor_df.groupby('list')['count'].sum()
additional_infor_df = pd.DataFrame({'list':[index for index in grouped.index]})
additional_infor_df.insert(len(additional_infor_df.columns), 'count', list( grouped.iloc[:]))
additional_infor_df.insert(len(additional_infor_df.columns), 'ratio', [int(l)*100/float(self.row_num) for l in list( grouped.iloc[:])])
verify_str = '[[1, 0, 1, 2], 1, 16, True]'
additional_infor_df.insert(len(additional_infor_df.columns), 'success', ['1' if l==verify_str else '0' for l in additional_infor_df.iloc[:,0] ])
if len(list(additional_infor_df.groupby('success'))) == 2:
if list(additional_infor_df.groupby('success'))[1][0] == '1':
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[1]/self.row_num)
else:
accuracy_list.append(additional_infor_df.groupby('success')['count'].sum().iloc[0]/self.row_num)
elif len(list(additional_infor_df.groupby('success'))) == 1:
if list(additional_infor_df.groupby('success'))[0][0] == '1':
accuracy_list.append(1.0)
else:
accuracy_list.append(0.0)
else:
accuracy_list.append(0.0)
if i not in [4, 20,21]:
addition_list.append(additional_infor_df)
tar_path = './output/acc/'+self.name+'_acc'
if not os.path.exists(tar_path):
os.makedirs(tar_path)
additional_infor_df.to_excel(tar_path+'/' +str(i) + '_acc.xlsx')
self.accuracy_list, self.addition_list = accuracy_list, addition_list
return accuracy_list, addition_list
if __name__ == '__main__':
df = pd.read_excel('./data/ticket_user_mianyang.xlsx')
# df_junior = pd.read_excel('./data/junior.xlsx')
# df_senior = pd.read_excel('./data/senior.xlsx')
data_entity = data_analysis(df)
# data_entity_junior = data_analysis(df = df_junior, name = 'junior')
# data_entity_senior = data_analysis(df = df_senior, name = 'senior')
| 61.661502
| 630
| 0.546642
| 6,568
| 51,734
| 4.039738
| 0.035018
| 0.257792
| 0.292164
| 0.102212
| 0.865752
| 0.848905
| 0.840727
| 0.825463
| 0.818038
| 0.811405
| 0
| 0.025697
| 0.323018
| 51,734
| 838
| 631
| 61.735084
| 0.73189
| 0.009278
| 0
| 0.715385
| 0
| 0.002564
| 0.046037
| 0.002869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023077
| false
| 0
| 0.015385
| 0.001282
| 0.067949
| 0.002564
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1ec7979de0fc17ac4271cfc3d5c8691949f08e08
| 14,457
|
py
|
Python
|
components/exp-test-agent/tests/loop/test_agent_loop.py
|
sfahad1414/AGENT
|
84069edc96b6190bb03ffd5099cbc8966061a563
|
[
"Apache-2.0"
] | 15
|
2020-05-06T16:17:56.000Z
|
2022-03-30T12:25:16.000Z
|
components/exp-test-agent/tests/loop/test_agent_loop.py
|
dionny/AGENT
|
8a833406b590e23623fcc67db99f6f964d002396
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:17:16.000Z
|
2022-02-10T06:35:58.000Z
|
components/exp-test-agent/tests/loop/test_agent_loop.py
|
dionny/AGENT
|
8a833406b590e23623fcc67db99f6f964d002396
|
[
"Apache-2.0"
] | 7
|
2020-04-07T18:47:55.000Z
|
2022-03-30T12:14:58.000Z
|
from abstraction.actionable_state import ActionableState
from loop.agent_loop import AgentLoop
from unittest.mock import Mock, patch
@patch(AgentLoop.__module__ + '.threading.Thread')
def test_agent_start(thread):
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client)
thread_mock = Mock()
thread.return_value = thread_mock
# Act.
loop.start()
# Assert.
assert thread.called
assert thread_mock.start.called
@patch.dict(AgentLoop.__module__ + '.general_memory', {'SESSION_STOPPED': False}, clear=True)
def test_loop_start():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client)
# Act.
with patch(AgentLoop.__module__ + '.AgentLoop.loop_iteration') as loop_iteration:
with patch(AgentLoop.__module__ + '.AgentLoop.loop_end') as loop_end:
loop.loop_start()
# Assert.
assert runner_client.launch.called_with(sut_url)
assert loop_iteration.call_count == AgentLoop.NUM_ITERATIONS
assert loop_end.called
@patch.dict(AgentLoop.__module__ + '.general_memory', {'SESSION_STOPPED': True}, clear=True)
def test_session_stop():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client)
# Act.
with patch(AgentLoop.__module__ + '.AgentLoop.loop_iteration') as loop_iteration:
with patch(AgentLoop.__module__ + '.AgentLoop.loop_end') as loop_end:
loop.loop_start()
# Assert.
assert runner_client.launch.called_with(sut_url)
assert not loop_iteration.called
assert loop_end.called
@patch.dict(AgentLoop.__module__ + '.general_memory', {'SESSION_STOPPED': False}, clear=True)
def test_runner_unable_to_launch():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
runner_client.launch.return_value = False
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client)
# Act.
with patch(AgentLoop.__module__ + '.AgentLoop.loop_iteration') as loop_iteration:
with patch(AgentLoop.__module__ + '.AgentLoop.loop_end') as loop_end:
loop.loop_start()
# Assert.
assert runner_client.launch.called_with(sut_url)
assert not loop_iteration.called
assert not loop_end.called
def test_loop_end():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client)
# Act.
loop.loop_end()
# Assert.
assert runner_client.quit.called
@patch.dict(AgentLoop.__module__ + '.general_memory', {'SESSION_STOPPED': False}, clear=True)
def test_loop_lifecycle():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client)
# Act.
with patch(AgentLoop.__module__ + '.AgentLoop.loop_iteration') as loop_iteration:
loop.loop_start()
# Assert.
assert runner_client.launch.called_with(sut_url)
assert loop_iteration.call_count == AgentLoop.NUM_ITERATIONS
assert runner_client.quit.called
@patch.dict(AgentLoop.__module__ + '.general_memory', {'SESSION_STOPPED': False}, clear=True)
def test_loop_iteration_exception():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client)
# Act.
with patch(AgentLoop.__module__ + '.AgentLoop.loop_iteration') as loop_iteration:
with patch(AgentLoop.__module__ + '.AgentLoop.loop_end') as loop_end:
def exception_side_effect():
raise Exception('test')
loop_iteration.side_effect = exception_side_effect
loop.loop_start()
# Assert.
assert runner_client.launch.called_with(sut_url)
assert loop_iteration.call_count == 1
assert loop_end.called
def test_loop_iteration_happy_path():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
page_analysis = {
'analysis': {
'labelCandidates': ['Label_FirstName'],
'COMMIT': ['C1'],
'ERRORMESSAGE': ['E1'],
'errorMessages': ['E1']
}
}
page_analysis_client.run_analysis.return_value = page_analysis
widget_first_name_label = {
'key': 'Label_FirstName',
'label': 'FirstNameLabel',
'actions': [],
'selector': '#lblFirstName',
'properties': {
'tagName': 'LABEL',
'text': 'First Name',
'x': 10,
'y': 10
}
}
widget_first_name = {
'key': 'FIRSTNAME',
'label': 'FirstName',
'actions': ['set'],
'selector': '#firstName',
'properties': {
'tagName': 'INPUT',
'x': 20,
'y': 20
}
}
widget_save = {
'key': 'C1',
'label': 'Save',
'actions': ['click'],
'selector': '#save',
'properties': {
'tagName': 'BUTTON',
'x': 40,
'y': 40
}
}
widget_error = {
'key': 'E1',
'label': 'Error',
'properties': {
'tagName': 'LABEL',
'x': 60,
'y': 60
}
}
target_concrete_state = {
'widgets': {
'Label_FirstName': widget_first_name_label,
'FIRSTNAME': widget_first_name,
'C1': widget_save,
'E1': widget_error
}
}
flow_generator_client.generate_flow.return_value = "OBSERVE TEXTBOX FIRSTNAME " \
"TRY VALID FIRSTNAME " \
"CLICK COMMIT " \
"NOTOBSERVE ERRORMESSAGE"
runner_client.concrete_state.return_value = target_concrete_state
flow_publisher = Mock()
flow_executor = Mock()
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client, flow_publisher, flow_executor)
# Act.
with patch(AgentLoop.__module__ + '.PriorityMemory') as memory_mock:
actual_memory_mock = Mock()
memory_mock.return_value = actual_memory_mock
loop.loop_iteration()
# Assert.
assert flow_generator_client.generate_flow.called
assert flow_publisher.publish.call_count == 2
assert flow_executor.execute.call_count == 1
assert not actual_memory_mock.in_memory.called
def test_loop_iteration_no_generated_test_flows_should_explore():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
page_analysis = {
'analysis': {
'labelCandidates': ['Label_FirstName'],
'COMMIT': ['C1'],
'ERRORMESSAGE': ['E1'],
'errorMessages': ['E1']
}
}
page_analysis_client.run_analysis.return_value = page_analysis
widget_first_name_label = {
'key': 'Label_FirstName',
'label': 'FirstNameLabel',
'actions': [],
'selector': '#lblFirstName',
'properties': {
'tagName': 'LABEL',
'text': 'First Name',
'x': 10,
'y': 10
}
}
widget_first_name = {
'key': 'FIRSTNAME',
'label': 'FirstName',
'actions': ['set'],
'selector': '#firstName',
'properties': {
'tagName': 'INPUT',
'x': 20,
'y': 20
}
}
widget_save = {
'key': 'C1',
'label': 'Save',
'actions': ['click'],
'selector': '#save',
'properties': {
'tagName': 'BUTTON',
'x': 40,
'y': 40
}
}
widget_error = {
'key': 'E1',
'label': 'Error',
'properties': {
'tagName': 'LABEL',
'x': 60,
'y': 60
}
}
target_concrete_state = {
'widgets': {
'Label_FirstName': widget_first_name_label,
'FIRSTNAME': widget_first_name,
'C1': widget_save,
'E1': widget_error
}
}
flow_generator_client.generate_flow.return_value = None
runner_client.concrete_state.return_value = target_concrete_state
flow_publisher = Mock()
flow_executor = Mock()
# Act.
with patch(AgentLoop.__module__ + '.PriorityMemory') as memory_mock:
actual_memory_mock = Mock()
memory_mock.return_value = actual_memory_mock
actual_memory_mock.choose_widget.return_value = widget_first_name
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client, flow_publisher, flow_executor)
loop.loop_iteration()
# Assert.
assert flow_generator_client.generate_flow.called
assert not flow_publisher.publish.called
assert not flow_executor.execute.called
assert actual_memory_mock.in_memory.called
assert actual_memory_mock.update_memory.called
assert form_expert_client.get_concrete_value.called
assert runner_client.perform_action.called
class ConcreteTestFlowStub:
def __init__(self):
self.hash = 0
def calculate_hash(self):
pass
@patch.dict(AgentLoop.__module__ + '.celery_memory', {'HASH': [ConcreteTestFlowStub()]}, clear=True)
def test_loop_iteration_no_generated_test_flow_but_flows_in_queue():
# Arrange.
sut_url = "TEST"
runner_url = "TEST"
form_expert_client = Mock()
runner_client = Mock()
page_analysis_client = Mock()
flow_generator_client = Mock()
page_analysis = {
'analysis': {
'labelCandidates': ['Label_FirstName'],
'COMMIT': ['C1'],
'ERRORMESSAGE': ['E1'],
'errorMessages': ['E1']
}
}
page_analysis_client.run_analysis.return_value = page_analysis
widget_first_name_label = {
'key': 'Label_FirstName',
'label': 'FirstNameLabel',
'actions': [],
'selector': '#lblFirstName',
'properties': {
'tagName': 'LABEL',
'text': 'First Name',
'x': 10,
'y': 10
}
}
widget_first_name = {
'key': 'FIRSTNAME',
'label': 'FirstName',
'actions': ['set'],
'selector': '#firstName',
'properties': {
'tagName': 'INPUT',
'x': 20,
'y': 20
}
}
widget_save = {
'key': 'C1',
'label': 'Save',
'actions': ['click'],
'selector': '#save',
'properties': {
'tagName': 'BUTTON',
'x': 40,
'y': 40
}
}
widget_error = {
'key': 'E1',
'label': 'Error',
'properties': {
'tagName': 'LABEL',
'x': 60,
'y': 60
}
}
target_concrete_state = {
'widgets': {
'Label_FirstName': widget_first_name_label,
'FIRSTNAME': widget_first_name,
'C1': widget_save,
'E1': widget_error
}
}
abstract_state = ActionableState()
abstract_state.add_static_widget(widget_first_name_label)
abstract_state.add_widget(widget_first_name)
abstract_state.add_widget(widget_save)
abstract_state.add_widget(widget_error)
abstract_state.hash = "HASH"
flow_generator_client.generate_flow.return_value = None
runner_client.concrete_state.return_value = target_concrete_state
flow_publisher = Mock()
flow_executor = Mock()
# Act.
with patch(AgentLoop.__module__ + '.StateAbstracter') as state_abstracter:
with patch(AgentLoop.__module__ + '.PriorityMemory') as memory_mock:
actual_memory_mock = Mock()
memory_mock.return_value = actual_memory_mock
actual_mapper = Mock()
state_abstracter.return_value = actual_mapper
actual_mapper.process.return_value = abstract_state
loop = AgentLoop(sut_url, runner_url, form_expert_client, runner_client,
page_analysis_client, flow_generator_client, flow_publisher, flow_executor)
loop.loop_iteration()
# Assert.
assert flow_generator_client.generate_flow.called
assert not flow_publisher.publish.called
assert flow_executor.execute.called
assert not actual_memory_mock.in_memory.called
assert not actual_memory_mock.update_memory.called
assert not form_expert_client.get_concrete_value.called
assert not runner_client.perform_action.called
| 28.236328
| 104
| 0.605105
| 1,500
| 14,457
| 5.433333
| 0.094667
| 0.04908
| 0.060614
| 0.035092
| 0.866503
| 0.840123
| 0.83092
| 0.807362
| 0.786994
| 0.786994
| 0
| 0.007083
| 0.287127
| 14,457
| 511
| 105
| 28.291585
| 0.783718
| 0.015148
| 0
| 0.750656
| 0
| 0
| 0.126628
| 0.008798
| 0
| 0
| 0
| 0
| 0.094488
| 1
| 0.034121
| false
| 0.002625
| 0.007874
| 0
| 0.044619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1ed0478960f290427614380e0bea489b36313f29
| 2,596
|
py
|
Python
|
bidaf/tests/check_results.py
|
davidgolub/QuestionGeneration
|
6b31e1a8855774230051093ca24ba0a7750a6712
|
[
"MIT"
] | 117
|
2017-09-06T23:25:59.000Z
|
2021-06-29T12:24:26.000Z
|
bidaf/tests/check_results.py
|
davidgolub/QuestionGeneration
|
6b31e1a8855774230051093ca24ba0a7750a6712
|
[
"MIT"
] | 14
|
2017-12-06T21:08:28.000Z
|
2020-06-22T06:03:23.000Z
|
bidaf/tests/check_results.py
|
davidgolub/QuestionGeneration
|
6b31e1a8855774230051093ca24ba0a7750a6712
|
[
"MIT"
] | 33
|
2017-10-06T05:16:07.000Z
|
2021-05-10T00:30:13.000Z
|
import pickle
import gzip
for path in [42, 43, 44, 45, 46, 47, 48, 49, 51, 52, 53, 54]:
save_path = 'out/basic/19/eval/test-0%s000.pklz' % path#'out/basic/06/eval/dev-040000.pklz'#'out/basic/12/eval/dev-047000.pklz'#out/basic/10/eval/dev-053000.pklz'#'out/basic/09/eval/dev-042000.pklz' #'out/basic/06/eval/dev-040000.pklz'
f = gzip.open(save_path,'rb')
res= pickle.load(f)
f.close()
print(save_path)
print(res['f1'])
print(res['acc'])
#restore the object
#out/basic/19/eval
for path in ['041000']:
save_path = 'out/basic/17/eval/test-%s.pklz' % path#'out/basic/06/eval/dev-040000.pklz'#'out/basic/12/eval/dev-047000.pklz'#out/basic/10/eval/dev-053000.pklz'#'out/basic/09/eval/dev-042000.pklz' #'out/basic/06/eval/dev-040000.pklz'
f = gzip.open(save_path,'rb')
res= pickle.load(f)
f.close()
print(save_path)
print(res['f1'])
print(res['acc'])
for path in ['041000', '042000', '043000', '044000', '045000']:
save_path = 'out/basic/14/eval/test-%s.pklz' % path#'out/basic/06/eval/dev-040000.pklz'#'out/basic/12/eval/dev-047000.pklz'#out/basic/10/eval/dev-053000.pklz'#'out/basic/09/eval/dev-042000.pklz' #'out/basic/06/eval/dev-040000.pklz'
f = gzip.open(save_path,'rb')
res= pickle.load(f)
f.close()
print(save_path)
print(res['f1'])
print(res['acc'])
# out/basic/25/eval
for path in ['044000', '045000', '046000', '047000', '048000', '049000', '050000', '051000', '052000']:
save_path = 'out/basic/14/eval/dev-%s.pklz' % path#'out/basic/06/eval/dev-040000.pklz'#'out/basic/12/eval/dev-047000.pklz'#out/basic/10/eval/dev-053000.pklz'#'out/basic/09/eval/dev-042000.pklz' #'out/basic/06/eval/dev-040000.pklz'
f = gzip.open(save_path,'rb')
res= pickle.load(f)
f.close()
print(save_path)
print(res['f1'])
print(res['acc'])
for path in ['041000', '042000']:
save_path = 'out/basic/18/eval/dev-%s.pklz' % path#'out/basic/06/eval/dev-040000.pklz'#'out/basic/12/eval/dev-047000.pklz'#out/basic/10/eval/dev-053000.pklz'#'out/basic/09/eval/dev-042000.pklz' #'out/basic/06/eval/dev-040000.pklz'
f = gzip.open(save_path,'rb')
res= pickle.load(f)
f.close()
print(save_path)
print(res['f1'])
print(res['acc'])
for path in ['041000', '042000', '043000', '044000', '045000', '046000', '047000', '048000', '049000']:
save_path = 'out/basic/17/eval/dev-%s.pklz' % path#'out/basic/06/eval/dev-040000.pklz'#'out/basic/12/eval/dev-047000.pklz'#out/basic/10/eval/dev-053000.pklz'#'out/basic/09/eval/dev-042000.pklz' #'out/basic/06/eval/dev-040000.pklz'
f = gzip.open(save_path,'rb')
res= pickle.load(f)
f.close()
print(save_path)
print(res['f1'])
print(res['acc'])
| 39.333333
| 236
| 0.684515
| 468
| 2,596
| 3.758547
| 0.134615
| 0.172825
| 0.163729
| 0.095509
| 0.891984
| 0.891984
| 0.816941
| 0.816941
| 0.816941
| 0.816941
| 0
| 0.186924
| 0.080894
| 2,596
| 66
| 237
| 39.333333
| 0.550293
| 0.424884
| 0
| 0.72
| 0
| 0
| 0.259411
| 0.123888
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0.36
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1ed35ed5dcd7ea859b179eac8530879a11250d2c
| 13,603
|
py
|
Python
|
tests/test_finder.py
|
TrustedMercury/filter-profanity
|
7c38dbba19e341ad72068338952ff07dc8037e37
|
[
"MIT"
] | 8
|
2020-08-25T01:33:29.000Z
|
2021-02-21T12:01:03.000Z
|
tests/test_finder.py
|
TrustedMercury/filter-profanity
|
7c38dbba19e341ad72068338952ff07dc8037e37
|
[
"MIT"
] | 2
|
2020-10-20T13:05:05.000Z
|
2020-10-21T00:19:32.000Z
|
tests/test_finder.py
|
TrustedMercury/filter-profanity
|
7c38dbba19e341ad72068338952ff07dc8037e37
|
[
"MIT"
] | 3
|
2020-10-20T12:10:15.000Z
|
2020-12-05T00:36:10.000Z
|
import unittest
from profanity import get_profanity
class TestFinder(unittest.TestCase):
def test_finder_false_small(self):
self.assertEqual(
get_profanity(
"This doesn't have any profanity, why do I bother"), []
)
def test_finder_true_small(self):
self.assertEqual(
get_profanity("This code is definitely not sh!t"), ['sh!t']
)
def test_finder_false_large(self):
self.assertEqual(
get_profanity(
"lorem ipsum dolor sit amet i write this text by hand, poetry ain't my skill more likely it is to kill!. what the heck am i doing with my life? i thought to myself, but then i thought - maybe there is a point in all of this? a hidden treasure? no, there is not."),
[]
)
def test_finder_no_duplicates_large(self):
self.assertEqual(
get_profanity(
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut fringilla erat ut cursus suscipit. Curabitur odio metus, varius vitae felis eget, euismod tincidunt felis. Quisque justo nisl, gravida ut pulvinar sed, suscipit lobortis nibh. Aliquam vestibulum eleifend est, et dapibus leo aliquam id. Morbi porta sodales mauris, in fermentum sapien blandit nec. Cras turpis massa, efficitur eu euismod id, euismod ac ex. Ut luctus justo lectus, eget lacinia diam semper ac. Aliquam erat volutpat. sh!t habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Sed pharetra metus eu quam aliquet, quis tempor eros vulputate.Sed orci purus, aliquet sed facilisis ut, mollis et nisl. Curabitur finibus porttitor nisl vitae vestibulum. Duis tincidunt tempor maximus. Vestibulum in ante porttitor, luctus purus in, tempor lacus. Integer tempus, lorem eget rhoncus tristique, tortor orci laoreet felis, ac dapibus magna purus ac lectus. Integer blandit sit amet velit sed cursus. Proin justo ex, luctus sit amet sapien elementum, iaculis interdum lacus. Proin nec turpis quis leo fringilla consequat. Aliquam erat volutpat. Nam accumsan lorem ut justo consectetur, vel pulvinar risus congue.Vivamus eget eleifend libero. Suspendisse lobortis id nisi eu consectetur. Curabitur aliquam sed justo ut efficitur. Donec vel tristique leo. Suspendisse eget ipsum et sapien semper tempus quis ac elit. Aliquam quis ornare ante, et varius velit. Aenean egestas mattis aliquam. Fusce leo lacus, luctus in sagittis in, tempor quis augue. Suspendisse potenti. Nam scelerisque sapien ligula, eget finibus justo aliquam in. Etiam sollicitudin dapibus mauris, id ultricies diam mattis non. Praesent a varius dui. Curabitur eget urna sit amet ante consectetur dapibus a ut dolor. Proin ac vehicula nisl, sed scelerisque risus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Praesent ultricies risus dapibus nibh egestas, ac tristique velit faucibus.Nunc vel neque sapien. Phasellus quis nunc ut orci sh!t suscipit sed non nibh. Integer vehicula non justo eu finibus. Duis porttitor imperdiet felis, vitae tristique lorem consectetur et. Integer eu ligula id metus dignissim tempor vel vitae tortor. Nulla ac auctor dui. Nam tempus imperdiet elit non ultrices. In facilisis molestie ante, in eleifend ipsum mattis eget. Vivamus ultricies, nulla non dapibus condimentum, lorem velit aliquet nisi, ut sollicitudin tortor sem ac est. Proin suscipit tristique enim sit amet cursus.Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Praesent bibendum eleifend tellus vel egestas. Cras feugiat at leo sit amet pulvinar. Integer egestas at dolor vitae viverra. Phasellus ut velit id quam fermentum porta sed ac justo. Etiam ut luctus erat. Nulla vel turpis eu tortor eleifend iaculis. Fusce quis purus magna. Aliquam erat volutpat. In consequat nunc vel lobortis sh!t. Donec feugiat ullamcorper nisi, in laoreet nulla tincidunt a. Sed ullamcorper orci aliquam ligula varius efficitur.Sed erat augue, cursus euismod lorem eu, pretium accumsan quam. Donec nec felis a augue laoreet pretium ut ut quam. Proin tincidunt orci eu tellus bibendum ultricies ut at eros. Maecenas ac egestas purus, et vulputate turpis. Nullam nibh massa, pretium eget vestibulum a, tempor euismod neque. Nullam suscipit lorem augue, venenatis blandit nulla malesuada vitae. Praesent ut odio interdum, commodo quam eget, sodales urna.Etiam ac tincidunt velit. Aenean facilisis lacus massa, eu rhoncus mauris aliquet eget. Vestibulum elementum eros ac lectus sagittis molestie. Duis tellus turpis, dapibus eu tincidunt et, suscipit at leo. Integer hendrerit ligula dolor, non vulputate ante blandit ut. Curabitur sollicitudin arcu leo, quis porttitor metus fringilla at. Sed et erat ut eros molestie auctor nec eget justo. Aenean lobortis eleifend nulla, id aliquet magna ullamcorper et. In at tellus sapien. Ut interdum gravida diam in facilisis.Morbi non dictum neque. Praesent laoreet, lectus nec blandit mattis, neque nibh elementum nisi, eget venenatis mi ex eget turpis. Ut tempor erat tincidunt, dictum diam sit amet, accumsan magna. In efficitur augue sapien, sed lobortis orci tincidunt sed. Etiam rutrum mollis tellus bibendum volutpat. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Quisque risus dolor, consequat interdum ex non, pharetra dapibus diam. Curabitur non mauris sed orci laoreet venenatis. Fusce vehicula blandit finibus. Maecenas magna lacus, porttitor ut luctus vitae, mattis eget arcu.Aliquam elementum odio eget ligula venenatis euismod. Aliquam a dui sed metus sollicitudin pharetra non ut nulla. Proin posuere, enim id sollicitudin pellentesque, lectus enim ornare felis, a hendrerit nibh nisl non augue. Ut egestas accumsan ante, vitae pharetra nisl. Aliquam semper a massa sed bibendum. Maecenas sh!t orci eu purus accumsan viverra hendrerit sed urna. Ut non porta elit. In hac habitasse platea dictumst. Aliquam erat volutpat. Phasellus eget orci pulvinar, varius lacus sed, sodales justo. Curabitur porta tempor urna id efficitur. Morbi eget augue venenatis elit varius varius porttitor in augue. In efficitur laoreet arcu, vitae ultrices est accumsan sit amet. In facilisis egestas venenatis. Nunc sed neque et magna feugiat auctor. Aenean elementum augue quis porta tempus.In elementum sapien a nisi hendrerit, non dictum velit sagittis. Proin venenatis est tellus, quis ultricies lectus porttitor id. Duis id facilisis urna, ac eleifend erat. Sed malesuada erat non felis condimentum, vel vulputate nibh fermentum. Aenean bibendum magna sit amet urna rutrum porttitor. Ut eget nisl condimentum, mollis orci vel, faucibus purus. Suspendisse euismod dignissim sapien sit amet iaculis. Etiam sh!t porttitor felis. In ut varius massa. Sed vel felis eu eros maximus maximus. sh!t ultricies ligula sed purus semper malesuada. Morbi efficitur dictum mattis. Nullam viverra porttitor arcu, ut iaculis eros vehicula eu."),
['ass', 'cum', 'cums', 'sh!t', 'tit']
)
def test_finder_duplicates_large(self):
self.assertEqual(
get_profanity(
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut fringilla erat ut cursus suscipit. Curabitur odio metus, varius vitae felis eget, euismod tincidunt felis. Quisque justo nisl, gravida ut pulvinar sed, suscipit lobortis nibh. Aliquam vestibulum eleifend est, et dapibus leo aliquam id. Morbi porta sodales mauris, in fermentum sapien blandit nec. Cras turpis massa, efficitur eu euismod id, euismod ac ex. Ut luctus justo lectus, eget lacinia diam semper ac. Aliquam erat volutpat. sh!t habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Sed pharetra metus eu quam aliquet, quis tempor eros vulputate.Sed orci purus, aliquet sed facilisis ut, mollis et nisl. Curabitur finibus porttitor nisl vitae vestibulum. Duis tincidunt tempor maximus. Vestibulum in ante porttitor, luctus purus in, tempor lacus. Integer tempus, lorem eget rhoncus tristique, tortor orci laoreet felis, ac dapibus magna purus ac lectus. Integer blandit sit amet velit sed cursus. Proin justo ex, luctus sit amet sapien elementum, iaculis interdum lacus. Proin nec turpis quis leo fringilla consequat. Aliquam erat volutpat. Nam accumsan lorem ut justo consectetur, vel pulvinar risus congue.Vivamus eget eleifend libero. Suspendisse lobortis id nisi eu consectetur. Curabitur aliquam sed justo ut efficitur. Donec vel tristique leo. Suspendisse eget ipsum et sapien semper tempus quis ac elit. Aliquam quis ornare ante, et varius velit. Aenean egestas mattis aliquam. Fusce leo lacus, luctus in sagittis in, tempor quis augue. Suspendisse potenti. Nam scelerisque sapien ligula, eget finibus justo aliquam in. Etiam sollicitudin dapibus mauris, id ultricies diam mattis non. Praesent a varius dui. Curabitur eget urna sit amet ante consectetur dapibus a ut dolor. Proin ac vehicula nisl, sed scelerisque risus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Praesent ultricies risus dapibus nibh egestas, ac tristique velit faucibus.Nunc vel neque sapien. Phasellus quis nunc ut orci sh!t suscipit sed non nibh. Integer vehicula non justo eu finibus. Duis porttitor imperdiet felis, vitae tristique lorem consectetur et. Integer eu ligula id metus dignissim tempor vel vitae tortor. Nulla ac auctor dui. Nam tempus imperdiet elit non ultrices. In facilisis molestie ante, in eleifend ipsum mattis eget. Vivamus ultricies, nulla non dapibus condimentum, lorem velit aliquet nisi, ut sollicitudin tortor sem ac est. Proin suscipit tristique enim sit amet cursus.Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Praesent bibendum eleifend tellus vel egestas. Cras feugiat at leo sit amet pulvinar. Integer egestas at dolor vitae viverra. Phasellus ut velit id quam fermentum porta sed ac justo. Etiam ut luctus erat. Nulla vel turpis eu tortor eleifend iaculis. Fusce quis purus magna. Aliquam erat volutpat. In consequat nunc vel lobortis sh!t. Donec feugiat ullamcorper nisi, in laoreet nulla tincidunt a. Sed ullamcorper orci aliquam ligula varius efficitur.Sed erat augue, cursus euismod lorem eu, pretium accumsan quam. Donec nec felis a augue laoreet pretium ut ut quam. Proin tincidunt orci eu tellus bibendum ultricies ut at eros. Maecenas ac egestas purus, et vulputate turpis. Nullam nibh massa, pretium eget vestibulum a, tempor euismod neque. Nullam suscipit lorem augue, venenatis blandit nulla malesuada vitae. Praesent ut odio interdum, commodo quam eget, sodales urna.Etiam ac tincidunt velit. Aenean facilisis lacus massa, eu rhoncus mauris aliquet eget. Vestibulum elementum eros ac lectus sagittis molestie. Duis tellus turpis, dapibus eu tincidunt et, suscipit at leo. Integer hendrerit ligula dolor, non vulputate ante blandit ut. Curabitur sollicitudin arcu leo, quis porttitor metus fringilla at. Sed et erat ut eros molestie auctor nec eget justo. Aenean lobortis eleifend nulla, id aliquet magna ullamcorper et. In at tellus sapien. Ut interdum gravida diam in facilisis.Morbi non dictum neque. Praesent laoreet, lectus nec blandit mattis, neque nibh elementum nisi, eget venenatis mi ex eget turpis. Ut tempor erat tincidunt, dictum diam sit amet, accumsan magna. In efficitur augue sapien, sed lobortis orci tincidunt sed. Etiam rutrum mollis tellus bibendum volutpat. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Quisque risus dolor, consequat interdum ex non, pharetra dapibus diam. Curabitur non mauris sed orci laoreet venenatis. Fusce vehicula blandit finibus. Maecenas magna lacus, porttitor ut luctus vitae, mattis eget arcu.Aliquam elementum odio eget ligula venenatis euismod. Aliquam a dui sed metus sollicitudin pharetra non ut nulla. Proin posuere, enim id sollicitudin pellentesque, lectus enim ornare felis, a hendrerit nibh nisl non augue. Ut egestas accumsan ante, vitae pharetra nisl. Aliquam semper a massa sed bibendum. Maecenas sh!t orci eu purus accumsan viverra hendrerit sed urna. Ut non porta elit. In hac habitasse platea dictumst. Aliquam erat volutpat. Phasellus eget orci pulvinar, varius lacus sed, sodales justo. Curabitur porta tempor urna id efficitur. Morbi eget augue venenatis elit varius varius porttitor in augue. In efficitur laoreet arcu, vitae ultrices est accumsan sit amet. In facilisis egestas venenatis. Nunc sed neque et magna feugiat auctor. Aenean elementum augue quis porta tempus.In elementum sapien a nisi hendrerit, non dictum velit sagittis. Proin venenatis est tellus, quis ultricies lectus porttitor id. Duis id facilisis urna, ac eleifend erat. Sed malesuada erat non felis condimentum, vel vulputate nibh fermentum. Aenean bibendum magna sit amet urna rutrum porttitor. Ut eget nisl condimentum, mollis orci vel, faucibus purus. Suspendisse euismod dignissim sapien sit amet iaculis. Etiam sh!t porttitor felis. In ut varius massa. Sed vel felis eu eros maximus maximus. sh!t ultricies ligula sed purus semper malesuada. Morbi efficitur dictum mattis. Nullam viverra porttitor arcu, ut iaculis eros vehicula eu.",
duplicates=True),
['ass', 'cum', 'cums', 'sh!t', 'tit', 'ass', 'ass', 'ass', 'ass', 'ass', 'ass', 'cum', 'cum', 'cum', 'cum',
'cum', 'cums', 'cums', 'cums', 'cums', 'cums', 'sh!t', 'sh!t', 'sh!t', 'sh!t', 'sh!t', 'tit', 'tit', 'tit',
'tit', 'tit', 'tit', 'tit', 'tit', 'tit']
)
if __name__ == '__main__':
unittest.main()
| 302.288889
| 6,050
| 0.786003
| 2,010
| 13,603
| 5.304478
| 0.125871
| 0.005909
| 0.014256
| 0.006753
| 0.956856
| 0.955168
| 0.952167
| 0.944663
| 0.940724
| 0.940724
| 0
| 0
| 0.16908
| 13,603
| 44
| 6,051
| 309.159091
| 0.943289
| 0
| 0
| 0.257143
| 0
| 0.085714
| 0.925142
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.057143
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a2050d9fcced680c4907e9b8cd141eb99b462800
| 8,932
|
py
|
Python
|
app/engine/item_components/aoe_components.py
|
ViolaBuddy/EscapeFromPlegia
|
5228b42e8525b445854d742dccf85ca65b320d70
|
[
"MIT"
] | null | null | null |
app/engine/item_components/aoe_components.py
|
ViolaBuddy/EscapeFromPlegia
|
5228b42e8525b445854d742dccf85ca65b320d70
|
[
"MIT"
] | null | null | null |
app/engine/item_components/aoe_components.py
|
ViolaBuddy/EscapeFromPlegia
|
5228b42e8525b445854d742dccf85ca65b320d70
|
[
"MIT"
] | null | null | null |
from app.data.item_components import ItemComponent
from app.data.components import Type
from app.utilities import utils
from app.engine import target_system, skill_system
from app.engine.game_state import game
class BlastAOE(ItemComponent):
nid = 'blast_aoe'
desc = "Gives Blast AOE"
tag = 'aoe'
expose = Type.Int # Radius
value = 1
def _get_power(self, unit) -> int:
empowered_splash = skill_system.empower_splash(unit)
return self.value + 1 + empowered_splash
def splash(self, unit, item, position) -> tuple:
ranges = set(range(self._get_power(unit)))
splash = target_system.find_manhattan_spheres(ranges, position[0], position[1])
splash = {pos for pos in splash if game.tilemap.check_bounds(pos)}
from app.engine import item_system
if item_system.is_spell(unit, item):
# spell blast
splash = [game.board.get_unit(s) for s in splash]
splash = [s.position for s in splash if s]
return None, splash
else:
# regular blast
splash = [game.board.get_unit(s) for s in splash if s != position]
splash = [s.position for s in splash if s]
return position if game.board.get_unit(position) else None, splash
def splash_positions(self, unit, item, position) -> set:
ranges = set(range(self._get_power(unit)))
splash = target_system.find_manhattan_spheres(ranges, position[0], position[1])
splash = {pos for pos in splash if game.tilemap.check_bounds(pos)}
return splash
class EnemyBlastAOE(BlastAOE, ItemComponent):
nid = 'enemy_blast_aoe'
desc = "Gives Blast AOE that only hits enemies"
tag = 'aoe'
def splash(self, unit, item, position) -> tuple:
ranges = set(range(self._get_power(unit)))
splash = target_system.find_manhattan_spheres(ranges, position[0], position[1])
splash = {pos for pos in splash if 0 <= pos[0] < game.tilemap.width and 0 <= pos[1] < game.tilemap.height}
from app.engine import item_system, skill_system
if item_system.is_spell(unit, item):
# spell blast
splash = [game.board.get_unit(s) for s in splash]
splash = [s.position for s in splash if s and skill_system.check_enemy(unit, s)]
return None, splash
else:
# regular blast
splash = [game.board.get_unit(s) for s in splash if s != position]
splash = [s.position for s in splash if s and skill_system.check_enemy(unit)]
return position if game.board.get_unit(position) else None, splash
def splash_positions(self, unit, item, position) -> set:
from app.engine import skill_system
ranges = set(range(self._get_power(unit)))
splash = target_system.find_manhattan_spheres(ranges, position[0], position[1])
splash = {pos for pos in splash if game.tilemap.check_bounds(pos)}
# Doesn't highlight allies positions
splash = {pos for pos in splash if not game.board.get_unit(pos) or skill_system.check_enemy(unit, game.board.get_unit(pos))}
return splash
class AllyBlastAOE(BlastAOE, ItemComponent):
nid = 'ally_blast_aoe'
desc = "Gives Blast AOE that only hits allies"
tag = 'aoe'
def splash(self, unit, item, position) -> tuple:
ranges = set(range(self._get_power(unit)))
splash = target_system.find_manhattan_spheres(ranges, position[0], position[1])
splash = {pos for pos in splash if game.tilemap.check_bounds(pos)}
from app.engine import skill_system
splash = [game.board.get_unit(s) for s in splash]
splash = [s.position for s in splash if s and skill_system.check_ally(unit, s)]
return None, splash
class EquationBlastAOE(BlastAOE, ItemComponent):
nid = 'equation_blast_aoe'
desc = "Gives Equation-Sized Blast AOE"
tag = 'aoe'
expose = Type.Equation # Radius
value = None
def _get_power(self, unit) -> int:
from app.engine import equations
value = equations.parser.get(self.value, unit)
empowered_splash = skill_system.empower_splash(unit)
return value + 1 + empowered_splash
class EnemyCleaveAOE(ItemComponent):
nid = 'enemy_cleave_aoe'
desc = "Gives Enemy Cleave AOE"
tag = 'aoe'
def splash(self, unit, item, position) -> tuple:
from app.engine import skill_system
pos = unit.position
all_positions = {(pos[0] - 1, pos[1] - 1),
(pos[0], pos[1] - 1),
(pos[0] + 1, pos[1] - 1),
(pos[0] - 1, pos[1]),
(pos[0] + 1, pos[1]),
(pos[0] - 1, pos[1] + 1),
(pos[0], pos[1] + 1),
(pos[0] + 1, pos[1] + 1)}
all_positions = {pos for pos in all_positions if game.tilemap.check_bounds(pos)}
all_positions.discard(position)
splash = all_positions
splash = [game.board.get_unit(pos) for pos in splash]
splash = [s.position for s in splash if s and skill_system.check_enemy(unit, s)]
main_target = position if game.board.get_unit(position) else None
return main_target, splash
def splash_positions(self, unit, item, position) -> set:
from app.engine import skill_system
pos = unit.position
all_positions = {(pos[0] - 1, pos[1] - 1),
(pos[0], pos[1] - 1),
(pos[0] + 1, pos[1] - 1),
(pos[0] - 1, pos[1]),
(pos[0] + 1, pos[1]),
(pos[0] - 1, pos[1] + 1),
(pos[0], pos[1] + 1),
(pos[0] + 1, pos[1] + 1)}
all_positions = {pos for pos in all_positions if game.tilemap.check_bounds(pos)}
all_positions.discard(position)
splash = all_positions
# Doesn't highlight allies positions
splash = {pos for pos in splash if not game.board.get_unit(pos) or skill_system.check_enemy(unit, game.board.get_unit(pos))}
return splash
class AllAlliesAOE(ItemComponent):
nid = 'all_allies_aoe'
desc = "Item affects all allies on the map including self"
tag = 'aoe'
def splash(self, unit, item, position) -> tuple:
from app.engine import skill_system
splash = [u.position for u in game.units if u.position and skill_system.check_ally(unit, u)]
return None, splash
def splash_positions(self, unit, item, position) -> set:
# All positions
splash = [(x, y) for x in range(game.tilemap.width) for y in range(game.tilemap.height)]
return splash
class AllAlliesExceptSelfAOE(ItemComponent):
nid = 'all_allies_except_self_aoe'
desc = "Item affects all allies on the map except user"
tag = 'aoe'
def splash(self, unit, item, position) -> tuple:
from app.engine import skill_system
splash = [u.position for u in game.units if u.position and skill_system.check_ally(unit, u) and u is not unit]
return None, splash
def splash_positions(self, unit, item, position) -> set:
# All positions
splash = [(x, y) for x in range(game.tilemap.width) for y in range(game.tilemap.height)]
return splash
class AllEnemiesAOE(ItemComponent):
nid = 'all_enemies_aoe'
desc = "Item affects all enemies on the map"
tag = 'aoe'
def splash(self, unit, item, position) -> tuple:
from app.engine import skill_system
splash = [u.position for u in game.units if u.position and skill_system.check_enemy(unit, u)]
return None, splash
def splash_positions(self, unit, item, position) -> set:
from app.engine import skill_system
# All positions
splash = {(x, y) for x in range(game.tilemap.width) for y in range(game.tilemap.height)}
# Doesn't highlight allies positions
splash = {pos for pos in splash if not game.board.get_unit(pos) or skill_system.check_enemy(unit, game.board.get_unit(pos))}
return splash
class LineAOE(ItemComponent):
nid = 'line_aoe'
desc = "Gives Line AOE"
tag = 'aoe'
def splash(self, unit, item, position) -> tuple:
splash = set(utils.raytrace(unit.position, position))
splash.discard(unit.position)
splash = [game.board.get_unit(s) for s in splash]
splash = [s.position for s in splash if s]
return None, splash
def splash_positions(self, unit, item, position) -> set:
splash = set(utils.raytrace(unit.position, position))
splash.discard(unit.position)
return splash
| 42.942308
| 133
| 0.609718
| 1,200
| 8,932
| 4.425833
| 0.088333
| 0.019582
| 0.032009
| 0.048202
| 0.824327
| 0.811335
| 0.78215
| 0.780079
| 0.761627
| 0.726605
| 0
| 0.011512
| 0.290081
| 8,932
| 207
| 134
| 43.149758
| 0.826053
| 0.023735
| 0
| 0.686747
| 0
| 0
| 0.052706
| 0.003059
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10241
| false
| 0
| 0.096386
| 0
| 0.554217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
a20bf7fce9b728f58f60e677354a340b2cf6233f
| 15,186
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/jumbo/phys/phys_studio_wisun_fan_1_0.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 69
|
2021-12-16T01:34:09.000Z
|
2022-03-31T08:27:39.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/jumbo/phys/phys_studio_wisun_fan_1_0.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/jumbo/phys/phys_studio_wisun_fan_1_0.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 21
|
2021-12-20T09:05:45.000Z
|
2022-03-28T02:52:28.000Z
|
from pyradioconfig.calculator_model_framework.interfaces.iphy import IPhy
class PhysStudioWisunFanJumbo(IPhy):
### PHYs Tested by Apps ###
def PHY_IEEE802154_WISUN_868MHz_2GFSK_50kbps_1a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, EU-868MHz, 1a (2FSK 50kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.EU
phy.profile_inputs.wisun_operating_class.value = 1
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 863100000
phy.profile_inputs.channel_spacing_hz.value = 100000
return phy
def PHY_IEEE802154_WISUN_873MHz_2GFSK_50kbps_1a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, EU-873MHz, 1a (2FSK 50kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.EU
phy.profile_inputs.wisun_operating_class.value = 3
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 870100000
phy.profile_inputs.channel_spacing_hz.value = 100000
return phy
def PHY_IEEE802154_WISUN_866MHz_2GFSK_50kbps_1a_IN(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, IN-866MHz, 1a (2FSK 50kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.IN
phy.profile_inputs.wisun_operating_class.value = 1
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 865100000
phy.profile_inputs.channel_spacing_hz.value = 100000
return phy
def PHY_IEEE802154_WISUN_915MHz_2GFSK_50kbps_1b_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, NA-915MHz, 1b (2FSK 50kbps mi=1.0)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1b
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.NA
phy.profile_inputs.wisun_operating_class.value = 1
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 902200000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_470MHz_2GFSK_50kbps_1b_CN(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, CN-470MHz, 1b (2FSK 50kbps mi=1.0)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1b
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.CN
phy.profile_inputs.wisun_operating_class.value = 1
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 470200000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_868MHz_2GFSK_100kbps_2a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0,
readable_name='Wi-SUN FAN, EU-868MHz, 2a (2FSK 100kbps mi=0.5)', phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.EU
phy.profile_inputs.wisun_operating_class.value = 2
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 863100000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_873MHz_2GFSK_100kbps_2a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, EU-873MHz, 2a (2FSK 100kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.EU
phy.profile_inputs.wisun_operating_class.value = 4
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 870200000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_866MHz_2GFSK_100kbps_2a_IN(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, IN-866MHz, 2a (2FSK 100kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.IN
phy.profile_inputs.wisun_operating_class.value = 2
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 865100000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_915MHz_2GFSK_100kbps_2a_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, NA-915MHz, 2a (2FSK 100kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.NA
phy.profile_inputs.wisun_operating_class.value = 1
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 902200000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_915MHz_2GFSK_150kbps_3_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, NA-915MHz, 3 (2FSK 150kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode3
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.NA
phy.profile_inputs.wisun_operating_class.value = 2
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 902400000
phy.profile_inputs.channel_spacing_hz.value = 400000
return phy
def PHY_IEEE802154_WISUN_920MHz_2GFSK_100kbps_2b_JP(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, JP-920MHz, 2b (2FSK 100kbps mi=1.0)', phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2b
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.JP
phy.profile_inputs.wisun_operating_class.value = 2
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 920900000
phy.profile_inputs.channel_spacing_hz.value = 400000
return phy
def PHY_IEEE802154_WISUN_868MHz_2GFSK_150kbps_3_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, EU-868MHz, 3 (2FSK 150kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode3
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.EU
phy.profile_inputs.wisun_operating_class.value = 2
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 863100000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_873MHz_2GFSK_150kbps_3_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, EU-873MHz, 3 (2FSK 150kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode3
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.EU
phy.profile_inputs.wisun_operating_class.value = 4
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 870200000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_866MHz_2GFSK_150kbps_3_IN(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, IN-866MHz, 3 (2FSK 150kbps mi=0.5)',
phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode3
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.IN
phy.profile_inputs.wisun_operating_class.value = 2
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 865100000
phy.profile_inputs.channel_spacing_hz.value = 200000
return phy
def PHY_IEEE802154_WISUN_915MHz_2GFSK_200kbps_4a_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, NA-915MHz, 4a (2GFSK 200kbps mi=0.5)', phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode4a
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.NA
phy.profile_inputs.wisun_operating_class.value = 2
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 902400000
phy.profile_inputs.channel_spacing_hz.value = 400000
return phy
def PHY_IEEE802154_WISUN_920MHz_2GFSK_200kbps_4b_JP(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0, readable_name='Wi-SUN FAN, JP-920MHz, 4b (2GFSK 200kbps mi=1.0)', phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode4b
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.JP
phy.profile_inputs.wisun_operating_class.value = 3
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 920800000
phy.profile_inputs.channel_spacing_hz.value = 600000
return phy
def PHY_IEEE802154_WISUN_915MHz_2GFSK_300kbps_5_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.wisun_fan_1_0,
readable_name='Wi-SUN FAN, NA-915MHz, 5 (2GFSK 300kbps mi=0.5)', phy_name=phy_name)
# Wi-SUN Inputs
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode5
phy.profile_inputs.wisun_reg_domain.value = model.vars.wisun_reg_domain.var_enum.NA
phy.profile_inputs.wisun_operating_class.value = 3
# Default xtal frequency of 38.4MHz
phy.profile_inputs.xtal_frequency_hz.value = 38400000
# Temporary redundant inputs for base frequency and channel spacing (required due to Studio UI limitations)
phy.profile_inputs.base_frequency_hz.value = 902600000
phy.profile_inputs.channel_spacing_hz.value = 600000
return phy
| 49.145631
| 149
| 0.721454
| 2,175
| 15,186
| 4.747126
| 0.051494
| 0.098789
| 0.158063
| 0.103729
| 0.966005
| 0.966005
| 0.96339
| 0.96339
| 0.956416
| 0.956416
| 0
| 0.075103
| 0.203872
| 15,186
| 309
| 150
| 49.145631
| 0.778908
| 0.173713
| 0
| 0.727811
| 0
| 0
| 0.063406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100592
| false
| 0
| 0.005917
| 0
| 0.213018
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a20fbafdc832bd90fa2ba3529e194ba79e990927
| 2,051
|
py
|
Python
|
tests/test_nmpc.py
|
APLA-Toolbox/pymapf
|
255df006925401e5ccdf82afc7dac339221574ba
|
[
"MIT"
] | 25
|
2021-01-17T01:02:25.000Z
|
2022-02-13T09:20:59.000Z
|
tests/test_nmpc.py
|
APLA-Toolbox/pymapf
|
255df006925401e5ccdf82afc7dac339221574ba
|
[
"MIT"
] | 37
|
2021-01-16T22:36:32.000Z
|
2021-11-15T11:51:59.000Z
|
tests/test_nmpc.py
|
APLA-Toolbox/pymapf
|
255df006925401e5ccdf82afc7dac339221574ba
|
[
"MIT"
] | 5
|
2021-04-02T08:27:52.000Z
|
2021-11-17T12:43:52.000Z
|
# -*- coding: utf-8 -*-
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from pymapf.decentralized.nmpc.nmpc import MultiAgentNMPC
from pymapf.decentralized.position import Position
def test_nmpc_agents():
nmpc = MultiAgentNMPC()
nmpc.register_agent("toto", Position(1, 2), Position(4, 5))
nmpc.register_agent("tata", Position(2, 2), Position(7, 5))
nmpc.register_agent("titi", Position(2, 3), Position(4, 8))
assert len(nmpc.agents) == 3
def test_nmpc_obstacles():
nmpc = MultiAgentNMPC()
nmpc.register_obstacle(2, 3.14, Position(2, 3))
assert len(nmpc.obstacles_objects) == 1
def test_sim_no_obstacles():
nmpc = MultiAgentNMPC()
nmpc.register_agent("toto", Position(1, 2), Position(4, 5))
nmpc.register_agent("tata", Position(2, 2), Position(7, 5))
nmpc.register_agent("titi", Position(2, 3), Position(4, 8))
nmpc.run_simulation()
def test_sim_obstacles():
nmpc = MultiAgentNMPC()
nmpc.register_agent("toto", Position(1, 2), Position(4, 5))
nmpc.register_agent("tata", Position(2, 2), Position(7, 5))
nmpc.register_agent("titi", Position(2, 3), Position(4, 8))
nmpc.register_obstacle(2, 3.14, Position(2, 3))
nmpc.register_obstacle(2, -3.14, Position(10, 7))
nmpc.run_simulation()
def test_visualize_no_obs():
nmpc = MultiAgentNMPC()
nmpc.register_agent("toto", Position(1, 2), Position(4, 5))
nmpc.register_agent("tata", Position(2, 2), Position(7, 5))
nmpc.register_agent("titi", Position(2, 3), Position(4, 8))
nmpc.run_simulation()
nmpc.visualize("toto", 10, 10)
def test_visualize_obs():
nmpc = MultiAgentNMPC()
nmpc.register_agent("toto", Position(1, 2), Position(4, 5))
nmpc.register_agent("tata", Position(2, 2), Position(7, 5))
nmpc.register_agent("titi", Position(2, 3), Position(4, 8))
nmpc.register_obstacle(2, 3.14, Position(2, 3))
nmpc.register_obstacle(2, -3.14, Position(10, 7))
nmpc.run_simulation()
nmpc.visualize("toto", 10, 10)
| 33.080645
| 67
| 0.680644
| 298
| 2,051
| 4.540268
| 0.147651
| 0.177384
| 0.18847
| 0.133038
| 0.777531
| 0.747228
| 0.747228
| 0.747228
| 0.716186
| 0.691057
| 0
| 0.059298
| 0.153096
| 2,051
| 61
| 68
| 33.622951
| 0.719632
| 0.010239
| 0
| 0.711111
| 0
| 0
| 0.033531
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 1
| 0.133333
| false
| 0
| 0.088889
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
44c8baa40c0818b00dbedf0031e7d3fa0a55f720
| 154
|
py
|
Python
|
testing-python/tests/test_sum_pytest.py
|
hejnal/code-retreat-prep
|
2c9d8af0c8c2ad2cf79f5684c65ec0a118dcd805
|
[
"Apache-2.0"
] | null | null | null |
testing-python/tests/test_sum_pytest.py
|
hejnal/code-retreat-prep
|
2c9d8af0c8c2ad2cf79f5684c65ec0a118dcd805
|
[
"Apache-2.0"
] | null | null | null |
testing-python/tests/test_sum_pytest.py
|
hejnal/code-retreat-prep
|
2c9d8af0c8c2ad2cf79f5684c65ec0a118dcd805
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
def test_sum():
assert sum([1, 2, 3, 4]) == 10, "Should be 10"
def test_tuple():
assert sum((1, 2, 3)) == 6, "Should be 6"
| 22
| 50
| 0.558442
| 29
| 154
| 2.896552
| 0.586207
| 0.166667
| 0.238095
| 0.261905
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108333
| 0.220779
| 154
| 7
| 51
| 22
| 0.591667
| 0.12987
| 0
| 0
| 0
| 0
| 0.171642
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
44db410b3f9aa029090d7951099821b975de487b
| 23,740
|
py
|
Python
|
fix.py
|
zetatez/obcc
|
2ce7a41de3c2314d2ced088761b8190b4ad5faa5
|
[
"MIT"
] | null | null | null |
fix.py
|
zetatez/obcc
|
2ce7a41de3c2314d2ced088761b8190b4ad5faa5
|
[
"MIT"
] | null | null | null |
fix.py
|
zetatez/obcc
|
2ce7a41de3c2314d2ced088761b8190b4ad5faa5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author: Lorenzo
Email : zetatez@icloud.com
"""
import queue
import threading
import config as cf
from log import lg
from db_conn_pool import ConnPool
class DBFixTBNE(object):
"""DBFixNE: BD fix table not exist
"""
def __init__(self, db_tbs_diff):
super(DBFixTBNE, self).__init__()
self.__db_name_src = cf.DB_SRC.db_name
self.__db_name_dst = cf.DB_DST.db_name
# self.__db_name_diff = cf.DB_DIFF.db_name
self.__concurrency_tables = cf.CONCURRENCY_TABLES
self.que_Dst = queue.Queue(self.__concurrency_tables)
self.que_Src = queue.Queue(self.__concurrency_tables)
self.worker_list_DBFixTBNESrc = []
self.worker_list_DBFixTBNEDst = []
self.db_tbs_diff = db_tbs_diff
self.run()
def run(self):
"""run:
"""
lg.info("DBFixTBNESrc: src: {}, dst: {}".format(self.__db_name_src, self.__db_name_dst))
for tb in self.db_tbs_diff.get('dst-src', []):
self.que_Src.put(1)
t = WorkerFixTBNESrc(tb, self.que_Src)
t.start()
self.worker_list_DBFixTBNESrc.append(t)
self.que_Src.join()
lg.info("DBFixTBNEDst: src: {}, dst: {}".format(self.__db_name_src, self.__db_name_dst))
for tb in self.db_tbs_diff.get('src-dst', []):
self.que_Dst.put(1)
t = WorkerFixTBNEDst(tb, self.que_Dst)
t.start()
self.worker_list_DBFixTBNEDst.append(t)
self.que_Dst.join()
class WorkerFixTBNESrc(threading.Thread):
def __init__(self, tb_name_dst, que):
threading.Thread.__init__(self)
self.__db_dst = cf.DB_DST
self.__db_name_dst = cf.DB_DST.db_name
self.__tb_name_dst = tb_name_dst
self.que = que
def run(self):
try:
lg.info("Worker Fix TB NE Src: {}".format(self.__tb_name_dst))
self.__task()
except Exception as e:
lg.error("Worker failed: {}".format(e))
finally:
self.que.get()
self.que.task_done()
def __task(self):
"""__task: drop table from dst
"""
sql = "DROP TABLE IF EXISTS {};".format(self.__tb_name_dst)
try:
pool_dst = ConnPool(self.__db_dst)
pool_dst.query(sql)
pool_dst.commit()
pool_dst.dispose()
except Exception as e:
lg.error("Drop table {} from dst failed: {}".format(self.__tb_name_dst, e))
class WorkerFixTBNEDst(threading.Thread):
def __init__(self, tb_name_src, que):
threading.Thread.__init__(self)
self.__db_name_src = cf.DB_SRC.db_name
self.__db_name_dst = cf.DB_DST.db_name
self.__db_name_diff = cf.DB_DIFF.db_name
self.__db_src = cf.DB_SRC
self.__db_dst = cf.DB_DST
self.__db_diff = cf.DB_DIFF
self.__db_src_info = cf.DB_SRC_INFO
self.__db_dst_info = cf.DB_DST_INFO
self.__tb_name_src = tb_name_src
self.__chunk_size = cf.CHUNK_SIZE
self.que = que
self.__tb_struct = ""
self.__idx_src = 0
self.tb_keys = []
self.tb_cols = []
def run(self):
try:
lg.info("Worker Fix TB NE Dst: {}".format(self.__tb_name_src))
self.__task()
except Exception as e:
lg.error("WorkerFixTBNEDst failed: {}".format(e))
finally:
self.que.get()
self.que.task_done()
def __task(self):
"""__task: sync src table to dst
"""
self.__get_tb_cols()
self.__get_tb_cols_type()
self.__get_ukpk_from_src()
self.__get_tables_stuct_from_src()
if self.__tb_struct:
res = self.__create_tb_in_dst()
if res:
while self.__idx_src != -1:
chunk = self.__get_next_chunk_from_src()
if chunk:
self.__dump_chunk_to_dst(chunk)
def __get_tb_cols(self):
"""__get_colo_from_src:
"""
sql = "SELECT COLUMN_NAME FROM COLUMNS WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s"
res = []
try:
pool_src_info = ConnPool(self.__db_src_info)
res = pool_src_info.fetchall(sql, (self.__db_name_src, self.__tb_name_src))
pool_src_info.dispose()
except Exception as e:
lg.error("get cols failed for {}.{}: {}".format(self.__db_name_src, self.__tb_name_src, e))
if len(res) == 0:
lg.error("get cols failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
self.tb_cols = []
else:
self.tb_cols = [x[0].decode() for x in res]
def __get_tb_cols_type(self):
"""__get_tb_cols_type:
"""
sql = "select column_name, data_type from columns where table_schema='{}' and table_name='{}'".format(self.__db_name_src, self.__tb_name_src)
res = []
try:
pool_dst_info = ConnPool(self.__db_src_info)
res = pool_dst_info.fetchall(sql)
pool_dst_info.dispose()
except Exception as e:
lg.error("Get tb cols type failed for {}.{}: {}".format(self.__db_name_src, self.__tb_name_src, e))
if len(res) == 0:
lg.error("Get tb cols type failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
self.tb_cols_mp = {}
else:
self.tb_cols_mp = dict([[y.decode() for y in x] for x in res])
def __get_ukpk_from_src(self):
"""_get_ukpk_from_src:
"""
sql_keys = "show index from " + self.__tb_name_src + " where Key_name='PRIMARY'"
res = []
try:
pool_src = ConnPool(self.__db_src)
res = pool_src.fetchall(sql_keys)
if len(res) == 0:
sql_keys = "show index from " + self.__tb_name_src + " where Non_unique=0"
res = pool_src.fetchall(sql_keys)
pool_src.dispose()
except Exception as e:
lg.error("Get uk or pk failed for {}.{}: {}".format(self.__db_name_src, self.__tb_name_src, e))
if len(res) == 0:
lg.error("Get uk or pk failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
self.tb_keys = []
else:
self.tb_keys = [x[4].decode() for x in res]
def __get_tables_stuct_from_src(self):
"""__get_tables_stuct_from_src:
"""
sql = "show create table {}".format(self.__tb_name_src)
res = ""
try:
pool_src = ConnPool(self.__db_src)
res = pool_src.fetchall(sql)
pool_src.dispose()
except Exception as e:
lg.error("Get table struct failed: {}".format(e))
res = [x[1].decode().replace("\n", "") for x in res]
if not res:
lg.error("Get table struct failed for table: {}".format(self.__tb_name_src))
self.__tb_struct = res[0] if res else ""
def __create_tb_in_dst(self):
"""__create_tb_in_dst:
"""
if self.__tb_struct:
try:
pool_dst = ConnPool(self.__db_dst)
pool_dst.query(self.__tb_struct)
pool_dst.commit()
pool_dst.dispose()
except Exception as e:
lg.error("Create table {} in dst failed: {}".format(self.__tb_name_src, e))
return False
return True
else:
return False
def __get_next_chunk_from_src(self):
"""get_next_chunk_from_src
"""
sql = "select {} from {} order by {} asc limit %s,%s".format(",".join(self.tb_cols), self.__tb_name_src, " asc,".join(self.tb_cols))
res = []
try:
pool_src = ConnPool(self.__db_src)
res = pool_src.fetchall(sql, (self.__idx_src, self.__chunk_size))
pool_src.dispose()
except Exception as e:
lg.error("get next chunk from src failed for {}.{}: {}".format(self.__db_name_src, self.__tb_name_src, e))
self.__idx_src = self.__idx_src + self.__chunk_size if len(res) == self.__chunk_size else -1
ress = []
if len(res) == 0:
lg.error("get next chunk from src failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
ress = []
else:
for x in res:
l = []
for y in x:
if isinstance(y, bytearray):
l.append(y.decode())
elif isinstance(y, int) or isinstance(y, float):
l.append(y)
else:
# anything else not be known by far
l.append(y)
ress.append(l)
return ress
def __dump_chunk_to_dst(self, chunk_from_src):
"""__dump_chunk_to_dst:
"""
def parser(row):
"""parser:
"""
values = ""
for key, val in zip(self.tb_cols, row):
key_type = self.tb_cols_mp.get(key, "")
if key_type == "int" or key_type == "float":
values = values + str(val) + ","
elif key_type == "str" or key_type == 'char':
values = values + "'" + str(val) + "'" + ","
else:
values = values + "'" + str(val) + "'" + ","
values = values.strip(",")
return values
sql = "insert into {}({}) values({})".format(self.__tb_name_src, ','.join(self.tb_cols), "{}")
try:
pool_dst = ConnPool(self.__db_dst)
for row in chunk_from_src:
values = parser(row)
sql_exec = sql.format(values)
pool_dst.query(sql_exec)
pool_dst.commit()
pool_dst.dispose()
except Exception as e:
lg.error("dump to dst failed for {}.{}: {}".format(self.__db_name_dst, self.__tb_name_dst, e))
class DBFixTBUB(object):
"""DBFixTBUB: db fix table unbalance
"""
def __init__(self, diff):
super(DBFixTBUB, self).__init__()
self.__db_name_src = cf.DB_SRC.db_name
self.__db_name_dst = cf.DB_DST.db_name
# self.__db_name_diff = cf.DB_DIFF.db_name
self.__concurrency_tables = cf.CONCURRENCY_TABLES
self.que_Dst = queue.Queue(self.__concurrency_tables)
self.que_Src = queue.Queue(self.__concurrency_tables)
self.worker_list_DBFixTBUBSrc = []
self.worker_list_DBFixTBUBDst = []
self.diff = diff
self.run()
def run(self):
"""run:
"""
lg.info("DBFixTBUB DST: src: {}, dst: {}".format(self.__db_name_src, self.__db_name_dst))
for d in self.diff:
if d.get('ct_dst',0) != 0:
self.que_Dst.put(1)
t = WorkerFixTBUBDst(d.get("tb_dst"), self.que_Dst)
t.start()
self.worker_list_DBFixTBUBDst.append(t)
self.que_Dst.join()
lg.info("DBFixTBUB SRC: src: {}, dst: {}".format(self.__db_name_src, self.__db_name_dst))
for d in self.diff:
if d.get('ct_src',0) != 0:
self.que_Src.put(1)
t = WorkerFixTBUBSrc(d.get("tb_src"), d.get("tb_dst"), self.que_Src)
t.start()
self.worker_list_DBFixTBUBSrc.append(t)
self.que_Src.join()
class WorkerFixTBUBDst(threading.Thread):
def __init__(self, tb_name_dst, que):
threading.Thread.__init__(self)
self.__db_name_src = cf.DB_SRC.db_name
self.__db_name_dst = cf.DB_DST.db_name
self.__db_name_diff = cf.DB_DIFF.db_name
self.__db_src = cf.DB_SRC
self.__db_dst = cf.DB_DST
self.__db_diff = cf.DB_DIFF
self.__db_src_info = cf.DB_SRC_INFO
self.__db_dst_info = cf.DB_DST_INFO
self.__chunk_size = cf.CHUNK_SIZE
self.__tb_name_diff = cf.DB_DIFF_TABLE
self.__tb_name_dst = tb_name_dst
self.que = que
self.__idx_diff = 0
self.tb_cols_mp = {}
def run(self):
try:
self.__task()
except Exception as e:
lg.error("Worker failed: {}".format(e))
finally:
self.que.get()
self.que.task_done()
def __task(self):
"""__task: delete from dst
"""
lg.info("Worker Fix TB UB DST: dst: {}.{}".format(self.__db_name_dst, self.__tb_name_dst))
self.__get_tb_cols_type()
while self.__idx_diff != -1:
chunk_from_diff = self.__get_next_chunk_from_diff()
self.__delete_from_dst_with_chunk_from_diff(chunk_from_diff)
def __get_tb_cols_type(self):
"""__get_tb_cols_type:
"""
sql = "select column_name, data_type from columns where table_schema='{}' and table_name='{}'".format(self.__db_name_dst, self.__tb_name_dst)
res = []
try:
pool_dst_info = ConnPool(self.__db_dst_info)
res = pool_dst_info.fetchall(sql)
pool_dst_info.dispose()
except Exception as e:
lg.error("Get tb cols type failed for {}.{}: {}".format(self.__db_name_dst, self.__tb_name_dst, e))
if len(res) == 0:
lg.error("Get tb cols type failed for {}.{}".format(self.__db_name_dst, self.__tb_name_dst))
self.tb_cols_mp = {}
else:
self.tb_cols_mp = dict([[y.decode() for y in x] for x in res])
def __get_next_chunk_from_diff(self):
"""__get_next_chunk_from_diff:
"""
sql = "select db_name,tb_name,tb_keys,tb_keys_val from {} where flag = {} order by db_name asc, tb_name asc, tb_keys asc, tb_keys_val asc limit %s,%s".format(self.__tb_name_diff, 0)
res = []
try:
pool_src = ConnPool(self.__db_diff)
res = pool_src.fetchall(sql, (self.__idx_diff, self.__chunk_size))
pool_src.dispose()
except Exception as e:
lg.error("get next chunk from diff failed for {}.{}: {}".format(self.__db_name_diff, self.__tb_name_diff, e))
self.__idx_diff = self.__idx_diff + self.__chunk_size if len(res) == self.__chunk_size else -1
if len(res) == 0:
lg.error("get next chunk from diff failed for {}.{}".format(self.__db_name_diff, self.__tb_name_diff))
res = []
else:
res = [[y.decode() for y in x] for x in res]
return res
def __delete_from_dst_with_chunk_from_diff(self, chunk_from_diff):
""" __delete_from_dst_with_chunk_from_diff:
"""
def parser(row):
"""parser:
"""
where_cond = ""
db_name, tb_name, tb_keys, tb_keys_val = row
keys_list = tb_keys.split("#")
keys_val_list = tb_keys_val.split("#")
for key, val in zip(keys_list, keys_val_list):
key_type = self.tb_cols_mp.get(key, "")
if key_type == "int":
where_cond = where_cond + key + "=" + val + ","
elif key_type == "float":
where_cond = where_cond + key + "=" + val + ","
elif key_type == "str" or key_type == 'char':
where_cond = where_cond + key + "='" + val + "',"
pass
where_cond = where_cond.strip(",")
return where_cond
try:
pool_dst = ConnPool(self.__db_dst)
for row in chunk_from_diff:
# lg.error(row)
where_cond = parser(row)
# lg.error(where_cond)
sql = "delete from {} where {};".format(self.__tb_name_dst, where_cond)
pool_dst.query(sql)
pool_dst.commit()
pool_dst.dispose()
except Exception as e:
lg.error("delete from dst table {} failed: {}".format(self.__tb_name_dst, e))
class WorkerFixTBUBSrc(threading.Thread):
def __init__(self, tb_name_src, tb_name_dst, que):
threading.Thread.__init__(self)
self.__db_name_src = cf.DB_SRC.db_name
self.__db_name_dst = cf.DB_DST.db_name
self.__db_name_diff = cf.DB_DIFF.db_name
self.__db_src = cf.DB_SRC
self.__db_dst = cf.DB_DST
self.__db_diff = cf.DB_DIFF
self.__db_src_info = cf.DB_SRC_INFO
self.__db_dst_info = cf.DB_DST_INFO
self.__chunk_size = cf.CHUNK_SIZE
self.__tb_name_diff = cf.DB_DIFF_TABLE
self.__tb_name_src = tb_name_src
self.__tb_name_dst = tb_name_dst
self.que = que
self.__idx_diff = 0
self.tb_cols_mp = {}
self.tb_cols = []
def run(self):
try:
self.__task()
except Exception as e:
lg.error("Worker failed: {}".format(e))
finally:
self.que.get()
self.que.task_done()
def __task(self):
"""__task: task
"""
lg.info("Worker Fix TB UB Src: src:{}.{} dst: {}.{}".format(self.__db_name_src, self.__tb_name_src, self.__db_name_dst, self.__tb_name_dst))
self.__get_tb_cols()
self.__get_tb_cols_type()
while self.__idx_diff != -1:
chunk_from_diff = self.__get_next_chunk_from_diff()
chunk_from_src = self.__get_chunk_from_src_with_chunk_from_diff(chunk_from_diff)
self.__dump_chunk_to_dst(chunk_from_src)
def __get_tb_cols(self):
"""__get_colo:
"""
sql = "SELECT COLUMN_NAME FROM COLUMNS WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s"
res = []
try:
pool_src_info = ConnPool(self.__db_src_info)
res = pool_src_info.fetchall(sql, (self.__db_name_src, self.__tb_name_src))
pool_src_info.dispose()
except Exception as e:
lg.error("get cols failed for {}.{}: {}".format(self.__db_name_src, self.__tb_name_src, e))
if len(res) == 0:
lg.error("get cols failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
self.tb_cols = []
else:
self.tb_cols = [x[0].decode() for x in res]
def __get_tb_cols_type(self):
"""__get_tb_cols_type:
"""
sql = "select column_name, data_type from columns where table_schema='{}' and table_name='{}'".format(self.__db_name_src, self.__tb_name_src)
res = []
try:
pool_dst_info = ConnPool(self.__db_src_info)
res = pool_dst_info.fetchall(sql)
pool_dst_info.dispose()
except Exception as e:
lg.error("Get tb cols type failed for {}.{}: {}".format(self.__db_name_src, self.__tb_name_src, e))
if len(res) == 0:
lg.error("Get tb cols type failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
self.tb_cols_mp = {}
else:
self.tb_cols_mp = dict([[y.decode() for y in x] for x in res])
def __get_next_chunk_from_diff(self):
"""__get_next_chunk_from_diff:
"""
sql = "select db_name,tb_name,tb_keys,tb_keys_val from {} where flag = {} order by db_name asc, tb_name asc, tb_keys asc, tb_keys_val asc limit %s,%s".format(self.__tb_name_diff, 1)
res = []
try:
pool_src = ConnPool(self.__db_diff)
res = pool_src.fetchall(sql, (self.__idx_diff, self.__chunk_size))
pool_src.dispose()
except Exception as e:
lg.error("get next chunk from diff failed for {}.{}: {}".format(self.__db_name_diff, self.__tb_name_diff, e))
self.__idx_diff = self.__idx_diff + self.__chunk_size if len(res) == self.__chunk_size else -1
if len(res) == 0:
lg.error("get next chunk from diff failed for {}.{}".format(self.__db_name_diff, self.__tb_name_diff))
res = []
else:
res = [[y.decode() for y in x] for x in res]
return res
def __get_chunk_from_src_with_chunk_from_diff(self, chunk):
"""__get_chunk_from_src_with_chunk_from_diff:
"""
def parser(row):
"""parser:
"""
db_name, tb_name, tb_keys, tb_keys_val = row
where_cond = ""
keys_list = tb_keys.split("#")
keys_val_list = tb_keys_val.split("#")
for key, val in zip(keys_list, keys_val_list):
true_key = self.tb_cols_mp.get(key, "")
if true_key == "int":
where_cond = key + "=" + val + ","
elif true_key == "float":
where_cond = key + "=" + val + ","
elif true_key == "str" or true_key == 'char':
where_cond = key + "='" + val + "',"
where_cond = where_cond.strip(",")
return where_cond
sql = "select {} from {} where {}".format(','.join(self.tb_cols), self.__tb_name_src, "{}")
ck = []
try:
pool_src = ConnPool(self.__db_src)
for row in chunk:
res = []
where_cond = parser(row)
sql_exec = sql.format(where_cond)
res = pool_src.fetchone(sql_exec)
if len(res) == 0:
lg.error("get row failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
else:
ress = []
for y in res:
if isinstance(y, bytearray):
ress.append(y.decode())
elif isinstance(y, int) or isinstance(y, float):
ress.append(y)
else:
# anything else not be known by far
ress.append(y)
ck.append(ress)
pool_src.dispose()
except Exception as e:
lg.error("get chunk failed for {}.{}: {}".format(self.__db_name_src, self.__tb_name_src, e))
if not ck:
lg.error("get chunk failed for {}.{}".format(self.__db_name_src, self.__tb_name_src))
return ck
def __dump_chunk_to_dst(self, chunk_from_src):
"""__dump_chunk_to_dst:
"""
def parser(row):
"""parser:
"""
values = ""
for key, val in zip(self.tb_cols, row):
key_type = self.tb_cols_mp.get(key, "")
if key_type == "int" or key_type == "float":
values = values + str(val) + ","
elif key_type == "str" or key_type == 'char':
values = values + "'" + str(val) + "'" + ","
else:
values = values + "'" + str(val) + "'" + ","
values = values.strip(",")
return values
sql = "replace into {}({}) values({})".format(self.__tb_name_dst, ','.join(self.tb_cols), "{}")
try:
pool_dst = ConnPool(self.__db_dst)
for row in chunk_from_src:
values = parser(row)
sql_exec = sql.format(values)
pool_dst.query(sql_exec)
pool_dst.commit()
pool_dst.dispose()
except Exception as e:
lg.error("dump to dst failed for {}.{}: {}".format(self.__db_name_dst, self.__tb_name_dst, e))
if __name__ == '__main__':
pass
| 38.044872
| 189
| 0.545114
| 3,106
| 23,740
| 3.722473
| 0.055699
| 0.047743
| 0.051029
| 0.037104
| 0.862394
| 0.850285
| 0.793115
| 0.763017
| 0.706885
| 0.686127
| 0
| 0.002347
| 0.335973
| 23,740
| 623
| 190
| 38.105939
| 0.731096
| 0.045956
| 0
| 0.740664
| 0
| 0.004149
| 0.106738
| 0.003113
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072614
| false
| 0.004149
| 0.010373
| 0
| 0.118257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
44fd4dcd8a9fe0f2a10c52831c8a746d6da78721
| 2,893
|
py
|
Python
|
trianglelib/utils.py
|
leimao/Sphinx-Python-TriangleLib
|
de1d9a606f619818226d2b62fa0b67e38d0b5243
|
[
"MIT"
] | 2
|
2020-08-03T11:10:59.000Z
|
2021-07-17T17:07:35.000Z
|
trianglelib/utils.py
|
leimao/Sphinx-Python-TriangleLib
|
de1d9a606f619818226d2b62fa0b67e38d0b5243
|
[
"MIT"
] | null | null | null |
trianglelib/utils.py
|
leimao/Sphinx-Python-TriangleLib
|
de1d9a606f619818226d2b62fa0b67e38d0b5243
|
[
"MIT"
] | null | null | null |
"""
Routines to test triangle properties without explicit instantiation.
"""
from trianglelib.shape import Triangle
def _make_triangle(a, b, c):
try:
return Triangle(a, b, c)
except ValueError:
return None
def is_triangle(a, b, c):
"""
Return whether lengths `a`, `b`, `c` can be the sides of a triangle.
:param a: side length one
:type a: :class:`float`
:param b: side length two
:type b: :class:`float`
:param c: side length three
:type c: :class:`float`
:return: whether lengths `a`, `b`, `c` can be the sides of a triangle
:rtype: :class:`bool`
"""
t = _make_triangle(a, b, c)
return (t is not None)
def is_equilateral(a, b, c):
"""
Return whether lengths `a`, `b`, and `c` are an equilateral triangle.
:param a: side length one
:type a: :class:`float`
:param b: side length two
:type b: :class:`float`
:param c: side length three
:type c: :class:`float`
:return: whether lengths `a`, `b`, and `c` are an equilateral triangle
:rtype: :class:`bool`
"""
t = _make_triangle(a, b, c)
return (t is not None) and t.is_equilateral()
def is_isosceles(a, b, c):
"""
Return whether lengths `a`, `b`, and `c` are an isosceles triangle.
:param a: side length one
:type a: :class:`float`
:param b: side length two
:type b: :class:`float`
:param c: side length three
:type c: :class:`float`
:return: whether lengths `a`, `b`, and `c` are an isosceles triangle
:rtype: :class:`bool`
"""
t = _make_triangle(a, b, c)
return (t is not None) and t.is_isosceles()
def compute_perimeter(a, b, c):
"""
Return the perimeter of the triangle with side lengths `a`, `b`, and `c`.
If the three lengths provided cannot be the sides of a triangle,
then the perimeter 0 is returned.
:param a: side length one
:type a: :class:`float`
:param b: side length two
:type b: :class:`float`
:param c: side length three
:type c: :class:`float`
:return: perimeter. If the three lengths provided cannot be the sides of a triangle, then the perimeter 0 is returned.
:rtype: :class:`float`
"""
t = _make_triangle(a, b, c)
return 0 if (t is None) else t.perimeter()
def compute_area(a, b, c):
"""
Return the area of the triangle with side lengths `a`, `b`, and `c`.
If the three lengths provided cannot be the sides of a triangle,
then the area 0 is returned.
:param a: side length one
:type a: :class:`float`
:param b: side length two
:type b: :class:`float`
:param c: side length three
:type c: :class:`float`
:return: area. If the three lengths provided cannot be the sides of a triangle, then the perimeter 0 is returned.
:rtype: :class:`float`
"""
t = _make_triangle(a, b, c)
return 0 if (t is None) else t.area()
| 29.520408
| 122
| 0.622191
| 452
| 2,893
| 3.940265
| 0.130531
| 0.022459
| 0.023582
| 0.050533
| 0.846154
| 0.819764
| 0.819764
| 0.819764
| 0.81808
| 0.81808
| 0
| 0.002778
| 0.25337
| 2,893
| 97
| 123
| 29.824742
| 0.821759
| 0.668165
| 0
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.047619
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
782e752e1ebc0e4fc5b1e1f49beb7ab4a20d6568
| 116
|
py
|
Python
|
codify/__init__.py
|
thorwhalen/codify
|
7a95e9d0acb0541f641d4a3163cbe388ce70c775
|
[
"MIT"
] | 1
|
2022-01-19T13:14:51.000Z
|
2022-01-19T13:14:51.000Z
|
codify/__init__.py
|
thorwhalen/codify
|
7a95e9d0acb0541f641d4a3163cbe388ce70c775
|
[
"MIT"
] | null | null | null |
codify/__init__.py
|
thorwhalen/codify
|
7a95e9d0acb0541f641d4a3163cbe388ce70c775
|
[
"MIT"
] | null | null | null |
from codify.qr_coding import qr_object, qrcode_img_of, qrcode_img_of_sha256
from codify.util import bytes_to_sha256
| 38.666667
| 75
| 0.87931
| 21
| 116
| 4.428571
| 0.619048
| 0.215054
| 0.236559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 0.086207
| 116
| 2
| 76
| 58
| 0.820755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7835789e231702961ad98d488208609543d0ac6c
| 2,804
|
py
|
Python
|
tests/test_run_order.py
|
pluto-py/engine
|
81e9973b194189382b75d24da39ee914dfa03c0b
|
[
"MIT"
] | 2
|
2020-09-08T16:49:55.000Z
|
2020-10-18T20:18:01.000Z
|
tests/test_run_order.py
|
malyvsen/Pluto.py
|
81e9973b194189382b75d24da39ee914dfa03c0b
|
[
"MIT"
] | null | null | null |
tests/test_run_order.py
|
malyvsen/Pluto.py
|
81e9973b194189382b75d24da39ee914dfa03c0b
|
[
"MIT"
] | null | null | null |
from pluto.notebook import Notebook
from pluto.run_order import RunOrder
from pluto.cell import Cell
from pluto.errors import NameConflictError, CycleError
class TestRunOrder:
def test_independent(self):
notebook = Notebook(cells=[
Cell(code='a = 0'),
Cell(code='b = 1')
])
run_order = RunOrder.from_notebook(notebook)
assert run_order.order == notebook.cells
assert run_order.errors == {}
def test_basic_ordered(self):
notebook = Notebook(cells=[
Cell(code='a = 0'),
Cell(code='b = a + 1')
])
run_order = RunOrder.from_notebook(notebook)
assert run_order.order == notebook.cells
assert run_order.errors == {}
def test_basic_unordered(self):
notebook = Notebook(cells=[
Cell(code='b = a + 0'),
Cell(code='a = 1')
])
run_order = RunOrder.from_notebook(notebook)
assert run_order.order == list(reversed(notebook.cells))
assert run_order.errors == {}
def test_name_conflict(self):
notebook = Notebook(cells=[
Cell(code='a = 0'),
Cell(code='a = 1')
])
run_order = RunOrder.from_notebook(notebook)
assert run_order.order == []
assert run_order.errors == {
notebook.cells[0]: [NameConflictError('a')],
notebook.cells[1]: [NameConflictError('a')]
}
def test_name_conflict_consequence(self):
notebook = Notebook(cells=[
Cell(code='a = 0'),
Cell(code='a = 1\nb = 1'),
Cell(code='b * 2')
])
run_order = RunOrder.from_notebook(notebook)
assert run_order.order == [notebook.cells[2]]
assert run_order.errors == {
notebook.cells[0]: [NameConflictError('a')],
notebook.cells[1]: [NameConflictError('a')]
}
def test_cycle(self):
notebook = Notebook(cells=[
Cell(code='a = b - 0'),
Cell(code='b = a + 1')
])
run_order = RunOrder.from_notebook(notebook)
assert run_order.order == []
assert run_order.errors == {
notebook.cells[0]: [CycleError(notebook.cells)],
notebook.cells[1]: [CycleError(notebook.cells)]
}
def test_cycle_consequence(self):
notebook = Notebook(cells=[
Cell(code='a = b - 0'),
Cell(code='b = a + 1\nc = -1'),
Cell(code='c // 2')
])
run_order = RunOrder.from_notebook(notebook)
assert run_order.order == [notebook.cells[2]]
assert run_order.errors == {
notebook.cells[0]: [CycleError(notebook.cells[:2])],
notebook.cells[1]: [CycleError(notebook.cells[:2])]
}
| 31.155556
| 64
| 0.557418
| 311
| 2,804
| 4.890675
| 0.118971
| 0.205128
| 0.128863
| 0.115056
| 0.82643
| 0.82643
| 0.756082
| 0.756082
| 0.715319
| 0.715319
| 0
| 0.015464
| 0.308131
| 2,804
| 90
| 65
| 31.155556
| 0.768557
| 0
| 0
| 0.64
| 0
| 0
| 0.044207
| 0
| 0
| 0
| 0
| 0
| 0.186667
| 1
| 0.093333
| false
| 0
| 0.053333
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7894dd02bb19f3c4531a92f2dee8725c76d45ab8
| 11,822
|
py
|
Python
|
lino_book/projects/lydia/tests/dumps/18.12.0/ledger_voucher.py
|
lino-framework/lino_book
|
4eab916832cd8f48ff1b9fc8c2789f0b437da0f8
|
[
"BSD-2-Clause"
] | 3
|
2016-08-25T05:58:09.000Z
|
2019-12-05T11:13:45.000Z
|
lino_book/projects/lydia/tests/dumps/18.12.0/ledger_voucher.py
|
lino-framework/lino_book
|
4eab916832cd8f48ff1b9fc8c2789f0b437da0f8
|
[
"BSD-2-Clause"
] | 18
|
2016-11-12T21:38:58.000Z
|
2019-12-03T17:54:38.000Z
|
lino_book/projects/lydia/tests/dumps/18.12.0/ledger_voucher.py
|
lino-framework/lino_book
|
4eab916832cd8f48ff1b9fc8c2789f0b437da0f8
|
[
"BSD-2-Clause"
] | 9
|
2016-10-15T11:12:33.000Z
|
2021-09-22T04:37:37.000Z
|
# -*- coding: UTF-8 -*-
logger.info("Loading 132 objects to table ledger_voucher...")
# fields: id, user, journal, entry_date, voucher_date, accounting_period, number, narration, state
loader.save(create_ledger_voucher(1,4,1,date(2015,1,7),date(2015,1,6),1,1,u'','20'))
loader.save(create_ledger_voucher(2,5,1,date(2015,1,8),date(2015,1,7),1,2,u'','20'))
loader.save(create_ledger_voucher(3,6,1,date(2015,1,9),date(2015,1,8),1,3,u'','20'))
loader.save(create_ledger_voucher(4,3,1,date(2015,1,10),date(2015,1,9),1,4,u'','20'))
loader.save(create_ledger_voucher(5,2,1,date(2015,1,11),date(2015,1,10),1,5,u'','20'))
loader.save(create_ledger_voucher(6,1,1,date(2015,2,7),date(2015,2,6),2,6,u'','20'))
loader.save(create_ledger_voucher(7,4,1,date(2015,2,8),date(2015,2,7),2,7,u'','20'))
loader.save(create_ledger_voucher(8,5,1,date(2015,2,9),date(2015,2,8),2,8,u'','20'))
loader.save(create_ledger_voucher(9,6,1,date(2015,2,10),date(2015,2,9),2,9,u'','20'))
loader.save(create_ledger_voucher(10,3,1,date(2015,3,7),date(2015,3,6),3,10,u'','20'))
loader.save(create_ledger_voucher(11,2,1,date(2015,4,7),date(2015,4,6),4,11,u'','20'))
loader.save(create_ledger_voucher(12,1,1,date(2015,4,8),date(2015,4,7),4,12,u'','20'))
loader.save(create_ledger_voucher(13,4,1,date(2015,4,9),date(2015,4,8),4,13,u'','20'))
loader.save(create_ledger_voucher(14,5,1,date(2015,4,10),date(2015,4,9),4,14,u'','20'))
loader.save(create_ledger_voucher(15,6,1,date(2015,4,11),date(2015,4,10),4,15,u'','20'))
loader.save(create_ledger_voucher(16,3,1,date(2015,4,12),date(2015,4,11),4,16,u'','20'))
loader.save(create_ledger_voucher(17,2,1,date(2015,4,13),date(2015,4,12),4,17,u'','20'))
loader.save(create_ledger_voucher(18,1,1,date(2015,4,14),date(2015,4,13),4,18,u'','20'))
loader.save(create_ledger_voucher(19,4,1,date(2015,5,7),date(2015,5,6),5,19,u'','20'))
loader.save(create_ledger_voucher(20,5,1,date(2015,5,8),date(2015,5,7),5,20,u'','20'))
loader.save(create_ledger_voucher(21,6,1,date(2015,5,9),date(2015,5,8),5,21,u'','20'))
loader.save(create_ledger_voucher(22,3,1,date(2015,5,10),date(2015,5,9),5,22,u'','20'))
loader.save(create_ledger_voucher(23,2,1,date(2015,5,11),date(2015,5,10),5,23,u'','20'))
loader.save(create_ledger_voucher(24,1,1,date(2015,5,12),date(2015,5,11),5,24,u'','20'))
loader.save(create_ledger_voucher(25,6,1,date(2015,1,1),date(2014,12,31),1,25,u'','20'))
loader.save(create_ledger_voucher(26,6,1,date(2015,1,1),date(2014,12,31),1,26,u'','20'))
loader.save(create_ledger_voucher(27,6,1,date(2015,1,1),date(2014,12,31),1,27,u'','20'))
loader.save(create_ledger_voucher(28,6,1,date(2015,1,1),date(2014,12,31),1,28,u'','20'))
loader.save(create_ledger_voucher(29,6,1,date(2015,1,1),date(2014,12,31),1,29,u'','20'))
loader.save(create_ledger_voucher(30,6,1,date(2015,1,1),date(2014,12,31),1,30,u'','20'))
loader.save(create_ledger_voucher(31,6,1,date(2015,1,1),date(2014,12,31),1,31,u'','20'))
loader.save(create_ledger_voucher(32,6,1,date(2015,1,1),date(2014,12,31),1,32,u'','20'))
loader.save(create_ledger_voucher(33,6,1,date(2015,1,1),date(2014,12,31),1,33,u'','20'))
loader.save(create_ledger_voucher(34,6,1,date(2015,1,1),date(2014,12,31),1,34,u'','20'))
loader.save(create_ledger_voucher(35,6,1,date(2015,1,1),date(2014,12,31),1,35,u'','20'))
loader.save(create_ledger_voucher(36,6,1,date(2015,1,1),date(2014,12,31),1,36,u'','20'))
loader.save(create_ledger_voucher(37,6,1,date(2015,1,1),date(2014,12,31),1,37,u'','20'))
loader.save(create_ledger_voucher(38,6,1,date(2015,1,1),date(2014,12,31),1,38,u'','20'))
loader.save(create_ledger_voucher(39,6,1,date(2015,1,1),date(2014,12,31),1,39,u'','20'))
loader.save(create_ledger_voucher(40,6,1,date(2015,1,1),date(2014,12,31),1,40,u'','20'))
loader.save(create_ledger_voucher(41,6,1,date(2015,1,1),date(2014,12,31),1,41,u'','20'))
loader.save(create_ledger_voucher(42,6,1,date(2015,1,1),date(2014,12,31),1,42,u'','20'))
loader.save(create_ledger_voucher(43,6,1,date(2015,1,1),date(2014,12,31),1,43,u'','20'))
loader.save(create_ledger_voucher(44,6,1,date(2015,1,1),date(2014,12,31),1,44,u'','20'))
loader.save(create_ledger_voucher(45,6,1,date(2015,1,1),date(2014,12,31),1,45,u'','20'))
loader.save(create_ledger_voucher(46,6,1,date(2015,1,1),date(2014,12,31),1,46,u'','20'))
loader.save(create_ledger_voucher(47,6,1,date(2015,1,1),date(2014,12,31),1,47,u'','20'))
loader.save(create_ledger_voucher(48,6,1,date(2015,2,1),date(2015,1,31),2,48,u'','20'))
loader.save(create_ledger_voucher(49,6,1,date(2015,2,1),date(2015,1,31),2,49,u'','20'))
loader.save(create_ledger_voucher(50,6,1,date(2015,2,1),date(2015,1,31),2,50,u'','20'))
loader.save(create_ledger_voucher(51,6,1,date(2015,2,1),date(2015,1,31),2,51,u'','20'))
loader.save(create_ledger_voucher(52,6,1,date(2015,2,1),date(2015,1,31),2,52,u'','20'))
loader.save(create_ledger_voucher(53,6,1,date(2015,2,1),date(2015,1,31),2,53,u'','20'))
loader.save(create_ledger_voucher(54,6,1,date(2015,2,1),date(2015,1,31),2,54,u'','20'))
loader.save(create_ledger_voucher(55,6,1,date(2015,2,1),date(2015,1,31),2,55,u'','20'))
loader.save(create_ledger_voucher(56,6,1,date(2015,2,1),date(2015,1,31),2,56,u'','20'))
loader.save(create_ledger_voucher(57,6,1,date(2015,2,1),date(2015,1,31),2,57,u'','20'))
loader.save(create_ledger_voucher(58,6,1,date(2015,2,1),date(2015,1,31),2,58,u'','20'))
loader.save(create_ledger_voucher(59,6,1,date(2015,2,1),date(2015,1,31),2,59,u'','20'))
loader.save(create_ledger_voucher(60,6,1,date(2015,2,1),date(2015,1,31),2,60,u'','20'))
loader.save(create_ledger_voucher(61,6,1,date(2015,2,1),date(2015,1,31),2,61,u'','20'))
loader.save(create_ledger_voucher(62,6,1,date(2015,2,1),date(2015,1,31),2,62,u'','20'))
loader.save(create_ledger_voucher(63,6,1,date(2015,2,1),date(2015,1,31),2,63,u'','20'))
loader.save(create_ledger_voucher(64,6,1,date(2015,2,1),date(2015,1,31),2,64,u'','20'))
loader.save(create_ledger_voucher(65,6,1,date(2015,2,1),date(2015,1,31),2,65,u'','20'))
loader.save(create_ledger_voucher(66,6,1,date(2015,2,1),date(2015,1,31),2,66,u'','20'))
loader.save(create_ledger_voucher(67,6,1,date(2015,2,1),date(2015,1,31),2,67,u'','20'))
loader.save(create_ledger_voucher(68,6,1,date(2015,2,1),date(2015,1,31),2,68,u'','20'))
loader.save(create_ledger_voucher(69,6,1,date(2015,2,1),date(2015,1,31),2,69,u'','20'))
loader.save(create_ledger_voucher(70,6,1,date(2015,2,1),date(2015,1,31),2,70,u'','20'))
loader.save(create_ledger_voucher(71,6,1,date(2015,3,1),date(2015,2,28),3,71,u'','20'))
loader.save(create_ledger_voucher(72,6,1,date(2015,3,1),date(2015,2,28),3,72,u'','20'))
loader.save(create_ledger_voucher(73,6,1,date(2015,3,1),date(2015,2,28),3,73,u'','20'))
loader.save(create_ledger_voucher(74,6,1,date(2015,3,1),date(2015,2,28),3,74,u'','20'))
loader.save(create_ledger_voucher(75,6,1,date(2015,3,1),date(2015,2,28),3,75,u'','20'))
loader.save(create_ledger_voucher(76,6,1,date(2015,3,1),date(2015,2,28),3,76,u'','20'))
loader.save(create_ledger_voucher(77,6,1,date(2015,3,1),date(2015,2,28),3,77,u'','20'))
loader.save(create_ledger_voucher(78,6,1,date(2015,3,1),date(2015,2,28),3,78,u'','20'))
loader.save(create_ledger_voucher(79,6,1,date(2015,3,1),date(2015,2,28),3,79,u'','20'))
loader.save(create_ledger_voucher(80,6,1,date(2015,3,1),date(2015,2,28),3,80,u'','20'))
loader.save(create_ledger_voucher(81,6,1,date(2015,3,1),date(2015,2,28),3,81,u'','20'))
loader.save(create_ledger_voucher(82,6,1,date(2015,3,1),date(2015,2,28),3,82,u'','20'))
loader.save(create_ledger_voucher(83,6,1,date(2015,3,1),date(2015,2,28),3,83,u'','20'))
loader.save(create_ledger_voucher(84,6,1,date(2015,3,1),date(2015,2,28),3,84,u'','20'))
loader.save(create_ledger_voucher(85,6,1,date(2015,3,1),date(2015,2,28),3,85,u'','20'))
loader.save(create_ledger_voucher(86,6,1,date(2015,3,1),date(2015,2,28),3,86,u'','20'))
loader.save(create_ledger_voucher(87,4,3,date(2015,1,3),date(2015,1,2),1,1,u'','20'))
loader.save(create_ledger_voucher(88,5,3,date(2015,1,4),date(2015,1,3),1,2,u'','20'))
loader.save(create_ledger_voucher(89,6,3,date(2015,1,5),date(2015,1,4),1,3,u'','20'))
loader.save(create_ledger_voucher(90,3,3,date(2015,1,6),date(2015,1,5),1,4,u'','20'))
loader.save(create_ledger_voucher(91,2,3,date(2015,1,7),date(2015,1,6),1,5,u'','20'))
loader.save(create_ledger_voucher(92,1,3,date(2015,1,8),date(2015,1,7),1,6,u'','20'))
loader.save(create_ledger_voucher(93,4,3,date(2015,1,9),date(2015,1,8),1,7,u'','20'))
loader.save(create_ledger_voucher(94,5,3,date(2015,2,3),date(2015,2,2),2,8,u'','20'))
loader.save(create_ledger_voucher(95,6,3,date(2015,2,4),date(2015,2,3),2,9,u'','20'))
loader.save(create_ledger_voucher(96,3,3,date(2015,2,5),date(2015,2,4),2,10,u'','20'))
loader.save(create_ledger_voucher(97,2,3,date(2015,2,6),date(2015,2,5),2,11,u'','20'))
loader.save(create_ledger_voucher(98,1,3,date(2015,2,7),date(2015,2,6),2,12,u'','20'))
loader.save(create_ledger_voucher(99,4,3,date(2015,2,8),date(2015,2,7),2,13,u'','20'))
loader.save(create_ledger_voucher(100,5,3,date(2015,2,9),date(2015,2,8),2,14,u'','20'))
loader.save(create_ledger_voucher(101,6,3,date(2015,3,3),date(2015,3,2),3,15,u'','20'))
loader.save(create_ledger_voucher(102,3,3,date(2015,3,4),date(2015,3,3),3,16,u'','20'))
loader.save(create_ledger_voucher(103,2,3,date(2015,3,5),date(2015,3,4),3,17,u'','20'))
loader.save(create_ledger_voucher(104,1,3,date(2015,3,6),date(2015,3,5),3,18,u'','20'))
loader.save(create_ledger_voucher(105,4,3,date(2015,3,7),date(2015,3,6),3,19,u'','20'))
loader.save(create_ledger_voucher(106,5,3,date(2015,3,8),date(2015,3,7),3,20,u'','20'))
loader.save(create_ledger_voucher(107,6,3,date(2015,3,9),date(2015,3,8),3,21,u'','20'))
loader.save(create_ledger_voucher(108,3,3,date(2015,4,3),date(2015,4,2),4,22,u'','20'))
loader.save(create_ledger_voucher(109,2,3,date(2015,4,4),date(2015,4,3),4,23,u'','20'))
loader.save(create_ledger_voucher(110,1,3,date(2015,4,5),date(2015,4,4),4,24,u'','20'))
loader.save(create_ledger_voucher(111,4,3,date(2015,4,6),date(2015,4,5),4,25,u'','20'))
loader.save(create_ledger_voucher(112,5,3,date(2015,4,7),date(2015,4,6),4,26,u'','20'))
loader.save(create_ledger_voucher(113,6,3,date(2015,4,8),date(2015,4,7),4,27,u'','20'))
loader.save(create_ledger_voucher(114,3,3,date(2015,4,9),date(2015,4,8),4,28,u'','20'))
loader.save(create_ledger_voucher(115,2,3,date(2015,5,3),date(2015,5,2),5,29,u'','20'))
loader.save(create_ledger_voucher(116,1,3,date(2015,5,4),date(2015,5,3),5,30,u'','20'))
loader.save(create_ledger_voucher(117,4,3,date(2015,5,5),date(2015,5,4),5,31,u'','20'))
loader.save(create_ledger_voucher(118,5,3,date(2015,5,6),date(2015,5,5),5,32,u'','20'))
loader.save(create_ledger_voucher(119,6,3,date(2015,5,7),date(2015,5,6),5,33,u'','20'))
loader.save(create_ledger_voucher(120,3,3,date(2015,5,8),date(2015,5,7),5,34,u'','20'))
loader.save(create_ledger_voucher(121,2,3,date(2015,5,9),date(2015,5,8),5,35,u'','20'))
loader.save(create_ledger_voucher(122,4,8,date(2015,1,31),date(2015,1,31),1,1,u'','20'))
loader.save(create_ledger_voucher(123,5,8,date(2015,2,28),date(2015,2,28),2,2,u'','20'))
loader.save(create_ledger_voucher(124,6,8,date(2015,3,28),date(2015,3,28),3,3,u'','20'))
loader.save(create_ledger_voucher(125,4,4,date(2015,1,13),date(2015,1,13),1,1,u'','20'))
loader.save(create_ledger_voucher(126,5,4,date(2015,2,13),date(2015,2,13),2,2,u'','20'))
loader.save(create_ledger_voucher(127,6,4,date(2015,3,13),date(2015,3,13),3,3,u'','20'))
loader.save(create_ledger_voucher(128,3,4,date(2015,4,13),date(2015,4,13),4,4,u'','20'))
loader.save(create_ledger_voucher(129,2,6,date(2015,1,21),date(2015,1,21),1,1,u'','20'))
loader.save(create_ledger_voucher(130,1,6,date(2015,2,21),date(2015,2,21),2,2,u'','20'))
loader.save(create_ledger_voucher(131,4,6,date(2015,3,21),date(2015,3,21),3,3,u'','20'))
loader.save(create_ledger_voucher(132,5,6,date(2015,4,21),date(2015,4,21),4,4,u'','20'))
loader.flush_deferred_objects()
| 85.666667
| 98
| 0.705126
| 2,669
| 11,822
| 3.022106
| 0.060322
| 0.239028
| 0.26184
| 0.36003
| 0.831267
| 0.798289
| 0.798289
| 0.590999
| 0.351351
| 0.18733
| 0
| 0.248243
| 0.013196
| 11,822
| 137
| 99
| 86.291971
| 0.443168
| 0.009981
| 0
| 0
| 0
| 0
| 0.026493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
152a88a1825453ea748e41d3f18227011e093066
| 40,277
|
py
|
Python
|
lang/python/github/com/metaprov/modelaapi/services/model/v1/model_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 5
|
2022-02-18T03:40:10.000Z
|
2022-03-01T16:11:24.000Z
|
lang/python/github/com/metaprov/modelaapi/services/model/v1/model_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 1
|
2022-01-07T19:59:25.000Z
|
2022-02-04T01:21:14.000Z
|
lang/python/github/com/metaprov/modelaapi/services/model/v1/model_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 1
|
2022-03-25T10:21:43.000Z
|
2022-03-25T10:21:43.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from github.com.metaprov.modelaapi.services.model.v1 import model_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2
class ModelServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListModels = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/ListModels',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ListModelsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ListModelsResponse.FromString,
)
self.CreateModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/CreateModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelResponse.FromString,
)
self.GetModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelResponse.FromString,
)
self.UpdateModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/UpdateModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.UpdateModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.UpdateModelResponse.FromString,
)
self.DeleteModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/DeleteModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeleteModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeleteModelResponse.FromString,
)
self.DeployModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/DeployModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeployModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeployModelResponse.FromString,
)
self.PublishModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/PublishModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PublishModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PublishModelResponse.FromString,
)
self.CreateModelProfile = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/CreateModelProfile',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelProfileRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelProfileResponse.FromString,
)
self.GetModelProfile = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModelProfile',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelProfileRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelProfileResponse.FromString,
)
self.GetModelMisclass = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModelMisclass',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetMisclassRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetMisclassResponse.FromString,
)
self.GetModelLogs = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModelLogs',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelLogsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelLogsResponse.FromString,
)
self.AbortModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/AbortModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.AbortModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.AbortModelResponse.FromString,
)
self.PauseModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/PauseModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PauseModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PauseModelResponse.FromString,
)
self.ResumeModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/ResumeModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ResumeModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ResumeModelResponse.FromString,
)
self.CompareModels = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/CompareModels',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompareModelsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompareModelsResponse.FromString,
)
self.CompileModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/CompileModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompileModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompileModelResponse.FromString,
)
self.DownloadModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/DownloadModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DownloadModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DownloadModelResponse.FromString,
)
self.FlagModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/FlagModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.FlagModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.FlagModelResponse.FromString,
)
self.TestModel = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.model.v1.ModelService/TestModel',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.TestModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.TestModelResponse.FromString,
)
class ModelServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def ListModels(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeployModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PublishModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateModelProfile(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelProfile(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelMisclass(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelLogs(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AbortModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PauseModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ResumeModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CompareModels(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CompileModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DownloadModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FlagModel(self, request, context):
"""Flag model
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TestModel(self, request, context):
"""Mark the model to test
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListModels': grpc.unary_unary_rpc_method_handler(
servicer.ListModels,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ListModelsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ListModelsResponse.SerializeToString,
),
'CreateModel': grpc.unary_unary_rpc_method_handler(
servicer.CreateModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelResponse.SerializeToString,
),
'GetModel': grpc.unary_unary_rpc_method_handler(
servicer.GetModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelResponse.SerializeToString,
),
'UpdateModel': grpc.unary_unary_rpc_method_handler(
servicer.UpdateModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.UpdateModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.UpdateModelResponse.SerializeToString,
),
'DeleteModel': grpc.unary_unary_rpc_method_handler(
servicer.DeleteModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeleteModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeleteModelResponse.SerializeToString,
),
'DeployModel': grpc.unary_unary_rpc_method_handler(
servicer.DeployModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeployModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeployModelResponse.SerializeToString,
),
'PublishModel': grpc.unary_unary_rpc_method_handler(
servicer.PublishModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PublishModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PublishModelResponse.SerializeToString,
),
'CreateModelProfile': grpc.unary_unary_rpc_method_handler(
servicer.CreateModelProfile,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelProfileRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelProfileResponse.SerializeToString,
),
'GetModelProfile': grpc.unary_unary_rpc_method_handler(
servicer.GetModelProfile,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelProfileRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelProfileResponse.SerializeToString,
),
'GetModelMisclass': grpc.unary_unary_rpc_method_handler(
servicer.GetModelMisclass,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetMisclassRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetMisclassResponse.SerializeToString,
),
'GetModelLogs': grpc.unary_unary_rpc_method_handler(
servicer.GetModelLogs,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelLogsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelLogsResponse.SerializeToString,
),
'AbortModel': grpc.unary_unary_rpc_method_handler(
servicer.AbortModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.AbortModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.AbortModelResponse.SerializeToString,
),
'PauseModel': grpc.unary_unary_rpc_method_handler(
servicer.PauseModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PauseModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PauseModelResponse.SerializeToString,
),
'ResumeModel': grpc.unary_unary_rpc_method_handler(
servicer.ResumeModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ResumeModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ResumeModelResponse.SerializeToString,
),
'CompareModels': grpc.unary_unary_rpc_method_handler(
servicer.CompareModels,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompareModelsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompareModelsResponse.SerializeToString,
),
'CompileModel': grpc.unary_unary_rpc_method_handler(
servicer.CompileModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompileModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompileModelResponse.SerializeToString,
),
'DownloadModel': grpc.unary_unary_rpc_method_handler(
servicer.DownloadModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DownloadModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DownloadModelResponse.SerializeToString,
),
'FlagModel': grpc.unary_unary_rpc_method_handler(
servicer.FlagModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.FlagModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.FlagModelResponse.SerializeToString,
),
'TestModel': grpc.unary_unary_rpc_method_handler(
servicer.TestModel,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.TestModelRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.TestModelResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'github.com.metaprov.modelaapi.services.model.v1.ModelService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ModelService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ListModels(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/ListModels',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ListModelsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ListModelsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/CreateModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/UpdateModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.UpdateModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.UpdateModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/DeleteModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeleteModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeleteModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeployModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/DeployModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeployModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DeployModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PublishModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/PublishModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PublishModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PublishModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateModelProfile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/CreateModelProfile',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelProfileRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CreateModelProfileResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetModelProfile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModelProfile',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelProfileRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelProfileResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetModelMisclass(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModelMisclass',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetMisclassRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetMisclassResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetModelLogs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/GetModelLogs',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelLogsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.GetModelLogsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AbortModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/AbortModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.AbortModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.AbortModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PauseModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/PauseModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PauseModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.PauseModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ResumeModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/ResumeModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ResumeModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.ResumeModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CompareModels(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/CompareModels',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompareModelsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompareModelsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CompileModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/CompileModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompileModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.CompileModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DownloadModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/DownloadModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DownloadModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.DownloadModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def FlagModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/FlagModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.FlagModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.FlagModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TestModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.model.v1.ModelService/TestModel',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.TestModelRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_model_dot_v1_dot_model__pb2.TestModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 60.749623
| 172
| 0.734067
| 4,306
| 40,277
| 6.346029
| 0.035996
| 0.067335
| 0.050501
| 0.063127
| 0.925199
| 0.925199
| 0.925199
| 0.897277
| 0.893362
| 0.872942
| 0
| 0.008461
| 0.204732
| 40,277
| 662
| 173
| 60.84139
| 0.84465
| 0.03533
| 0
| 0.522337
| 1
| 0
| 0.102204
| 0.073888
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068729
| false
| 0
| 0.003436
| 0.032646
| 0.109966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1595e7f69f89cad593cabd4d8b56f829193bbd8e
| 151
|
py
|
Python
|
clge/project_creator_files/sounds.py
|
RafayelGardishyan/CLGE
|
12afb3612963c7631406c3693cdcff4442379c1c
|
[
"MIT"
] | 2
|
2018-02-20T06:13:44.000Z
|
2019-10-31T21:55:00.000Z
|
clge/project_creator_files/sounds.py
|
RafayelGardishyan/CLGE
|
12afb3612963c7631406c3693cdcff4442379c1c
|
[
"MIT"
] | 6
|
2018-02-03T12:33:35.000Z
|
2018-09-10T17:27:57.000Z
|
clge/project_creator_files/sounds.py
|
RafayelGardishyan/CLGE
|
12afb3612963c7631406c3693cdcff4442379c1c
|
[
"MIT"
] | 1
|
2018-02-13T14:02:28.000Z
|
2018-02-13T14:02:28.000Z
|
from settings import SOUND_FOLDER_PREFIX as sfp
from clge import AudioPlayer
def get_sounds():
return {"sound1": AudioPlayer(sfp + "sound1.wav")}
| 25.166667
| 54
| 0.761589
| 21
| 151
| 5.333333
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015504
| 0.145695
| 151
| 5
| 55
| 30.2
| 0.852713
| 0
| 0
| 0
| 0
| 0
| 0.10596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
15b88fd3127ec5d507d43f469f861215906edce3
| 6,327
|
py
|
Python
|
studybuddyfinder/migrations/0025_auto_20201112_1840.py
|
SindhuMente/CS3240-StudyBuddyFinder
|
c3c2f2b80b8351df9255e44194bce6503f984183
|
[
"MIT"
] | 2
|
2020-12-10T02:39:00.000Z
|
2021-03-16T23:32:46.000Z
|
studybuddyfinder/migrations/0025_auto_20201112_1840.py
|
SindhuMente/CS3240-StudyBuddyFinder
|
c3c2f2b80b8351df9255e44194bce6503f984183
|
[
"MIT"
] | null | null | null |
studybuddyfinder/migrations/0025_auto_20201112_1840.py
|
SindhuMente/CS3240-StudyBuddyFinder
|
c3c2f2b80b8351df9255e44194bce6503f984183
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-11-12 23:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studybuddyfinder', '0024_auto_20201112_0023'),
]
operations = [
migrations.RenameField(
model_name='calendar',
old_name='fri',
new_name='frifts',
),
migrations.RemoveField(
model_name='calendar',
name='mon',
),
migrations.RemoveField(
model_name='calendar',
name='sat',
),
migrations.RemoveField(
model_name='calendar',
name='sun',
),
migrations.RemoveField(
model_name='calendar',
name='thurs',
),
migrations.RemoveField(
model_name='calendar',
name='tues',
),
migrations.RemoveField(
model_name='calendar',
name='wed',
),
migrations.AddField(
model_name='calendar',
name='friste',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='frittf',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='frittt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='fritttwo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='monfts',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='monste',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='monttf',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='monttt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='montttwo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='satfts',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='satste',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='satttf',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='satttt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='sattttwo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='sunfts',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='sunste',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='sunttf',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='sunttt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='suntttwo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='thursfts',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='thursste',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='thursttf',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='thursttt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='thurstttwo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='tuesfts',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='tuesste',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='tuesttf',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='tuesttt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='tuestttwo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='wedfts',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='wedste',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='wedttf',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='wedttt',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='calendar',
name='wedtttwo',
field=models.BooleanField(default=False),
),
]
| 29.704225
| 56
| 0.519678
| 490
| 6,327
| 6.616327
| 0.161224
| 0.113819
| 0.214991
| 0.259099
| 0.853794
| 0.842998
| 0.753239
| 0.753239
| 0.753239
| 0.753239
| 0
| 0.007723
| 0.365576
| 6,327
| 212
| 57
| 29.84434
| 0.79995
| 0.007112
| 0
| 0.757282
| 1
| 0
| 0.100318
| 0.003662
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004854
| 0
| 0.019417
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ec5cdaa5f765e746b19371eaba50b7d646a5efb6
| 45
|
py
|
Python
|
WeatherPy/api_keys.py
|
shujams/python-api-challenge
|
193aa5c6fd0fb23be7431d6fe5f9afe654693de5
|
[
"MIT"
] | null | null | null |
WeatherPy/api_keys.py
|
shujams/python-api-challenge
|
193aa5c6fd0fb23be7431d6fe5f9afe654693de5
|
[
"MIT"
] | null | null | null |
WeatherPy/api_keys.py
|
shujams/python-api-challenge
|
193aa5c6fd0fb23be7431d6fe5f9afe654693de5
|
[
"MIT"
] | null | null | null |
api_key = "99f4aaf8e92e82c523a3af11a8829066"
| 22.5
| 44
| 0.866667
| 3
| 45
| 12.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0.066667
| 45
| 1
| 45
| 45
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0.711111
| 0.711111
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
01d326571f2703129bfc0f61cbdc0393e1594594
| 37,068
|
py
|
Python
|
scripts/create_figures.py
|
duguyue100/spikefuel
|
e06713b62c0bc7f881dd75a5a4842723cce4aaab
|
[
"MIT"
] | 12
|
2016-05-12T09:58:19.000Z
|
2021-04-10T02:46:21.000Z
|
scripts/create_figures.py
|
colinshane/spikefuel
|
e06713b62c0bc7f881dd75a5a4842723cce4aaab
|
[
"MIT"
] | 1
|
2019-07-08T03:50:02.000Z
|
2019-07-09T07:22:18.000Z
|
scripts/create_figures.py
|
colinshane/spikefuel
|
e06713b62c0bc7f881dd75a5a4842723cce4aaab
|
[
"MIT"
] | 10
|
2016-04-09T01:58:22.000Z
|
2020-06-07T05:13:46.000Z
|
"""Create figures for visualization purposes.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
import os
from os.path import join
import h5py
import cPickle as pickle
import numpy as np
from moviepy.editor import ImageSequenceClip
import cv2
import matplotlib
import matplotlib.pylab as plt
from spikefuel import dvsproc, gui, tools, helpers
# matplotlib.rcParams.update({'font.size': 100})
# options:
# "vot", "tracking", "ucf50", "caltech256"
# "caltech256-identity-wrong-files"
# "vot-ps", "tracking-ps", "ucf50-ps", "caltech256-ps"
# "event-frequency"
# "mnist-dvs", "mnist-dvs-ps", "nmnist", "ncaltech101", "ncaltech101-ps"
# "white-test"
# "vot-dvs-figure" "vot-figure" "tracking-dvs-figure" "tracking-figure"
# "ucf50-figure", "ucf50-dvs-figure"
# "gui-show"
option = "y-time-figure"
data_path = os.environ["SPIKEFUEL_DATA"]
stats_path = os.path.join(data_path, "sf_data")
if option == "ucf50-dvs-figure":
ucf50_fn = "INI_UCF50_30fps_20160424.hdf5"
ucf50_path = join(data_path, ucf50_fn)
ucf50_db = h5py.File(ucf50_path, mode="r")
ucf50_stats_path = os.path.join(stats_path, "ucf50_stats.pkl")
vid_num = 10
f = file(ucf50_stats_path, mode="r")
ucf50_stats = pickle.load(f)
f.close()
ucf50_list = ucf50_stats["ucf50_list"]
cn = "RopeClimbing"
vid_name = ucf50_stats[cn][vid_num-1]
vid_n, vid_ex = os.path.splitext(vid_name)
seq_save_path = os.path.join(data_path, "all_imgs", "ucf50_dvs_figs")
num_frames = int(ucf50_db[cn][vid_n].attrs["num_frames"])
timestamps = ucf50_db[cn][vid_n]["timestamps"][()]
x_pos = ucf50_db[cn][vid_n]["x_pos"][()]
y_pos = ucf50_db[cn][vid_n]["y_pos"][()]
pol = ucf50_db[cn][vid_n]["pol"][()]
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol, window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
pol, num_frames, fs=3)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
for i in xrange(len(new_frames)):
img_name = join(seq_save_path, "%08d" % (i+1,)+".png")
cv2.imwrite(img_name, new_frames[i])
print "Sequence %s is saved at %s" % (vid_name, seq_save_path)
if option == "ucf50-figure":
ucf50_path = join(data_path, "UCF50", "UCF50")
ucf50_stats_path = os.path.join(stats_path, "ucf50_stats.pkl")
vid_num = 10
f = file(ucf50_stats_path, mode="r")
ucf50_stats = pickle.load(f)
f.close()
ucf50_list = ucf50_stats["ucf50_list"]
cn = "RopeClimbing"
seq_save_path = os.path.join(data_path, "all_imgs", "ucf50_figs")
vid_name = ucf50_stats[cn][vid_num-1]
frames, num_frames = helpers.read_video(join(ucf50_path, cn, vid_name))
for i in xrange(num_frames):
img_name = join(seq_save_path, "%08d" % (i+1,)+".png")
cv2.imwrite(img_name, frames[i])
print "Sequence %s is saved at %s" % (vid_name, seq_save_path)
if option == "tracking-figure":
tracking_path = os.path.join(data_path, "TrackingDataset")
tracking_stats_path = os.path.join(stats_path, "tracking_stats.pkl")
f = file(tracking_stats_path, mode="r")
tracking_stats = pickle.load(f)
f.close()
pl = tracking_stats["primary_list"]
sl = tracking_stats["secondary_list"]
pc = pl[6]
sc = sl[pc][3]
print sc
seq_save_path = os.path.join(data_path, "all_imgs", "tracking_figs")
frames = []
for img_name in tracking_stats[sc]:
img_path = join(tracking_path, pc, sc, img_name)
frames.append(cv2.imread(img_path))
gt_path = os.path.join(tracking_path, pc, sc, "groundtruth.txt")
gt = np.loadtxt(gt_path, dtype=np.float32, delimiter=",")
gt = helpers.trans_groundtruth(gt, method="size")
gt = np.reshape(gt, (gt.shape[0], 4, 2))
print "[MESSAGE] Images are loaded"
new_frames = gui.draw_poly_box_sequence(frames, gt)
new_frames = gui.rescale_image_sequence(new_frames, 270, 360, [0, 0, 0])
for i in xrange(len(new_frames)):
img_name = join(seq_save_path, "%08d" % (i+1,)+".png")
cv2.imwrite(img_name, new_frames[i])
print "Sequence %s is saved at %s" % (sc, seq_save_path)
if option == "tracking-dvs-figure":
tracking_fn = "INI_TrackingDataset_30fps_20160424.hdf5"
tracking_path = os.path.join(data_path, tracking_fn)
tracking_db = h5py.File(tracking_path, mode="r")
tracking_stats_path = os.path.join(stats_path, "tracking_stats.pkl")
f = file(tracking_stats_path, mode="r")
tracking_stats = pickle.load(f)
f.close()
pl = tracking_stats["primary_list"]
sl = tracking_stats["secondary_list"]
pc = pl[1]
sc = sl[pc][7]
print sc
seq_save_path = os.path.join(data_path, "all_imgs", "tracking_dvs_figs")
num_frames = int(tracking_db[pc][sc].attrs["num_frames"])
timestamps = tracking_db[pc][sc]["timestamps"][()]
x_pos = tracking_db[pc][sc]["x_pos"][()]
y_pos = tracking_db[pc][sc]["y_pos"][()]
pol = tracking_db[pc][sc]["pol"][()]
bounding_box = tracking_db[pc][sc]["bounding_box"][()]
gt = bounding_box[:, 1:]
gt = np.reshape(gt, (gt.shape[0], 4, 2))
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol, window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos, pol,
num_frames, fs=3)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs) / float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
rgb_frames = []
height = new_frames[0].shape[0]
width = new_frames[0].shape[1]
for frame in new_frames:
temp_frame = np.zeros((height, width, 3))
temp_frame[:, :, 0] = frame
temp_frame[:, :, 1] = frame
temp_frame[:, :, 2] = frame
rgb_frames.append(temp_frame)
new_frames = gui.draw_poly_box_sequence(rgb_frames, gt, color=[0, 0, 255])
for i in xrange(len(new_frames)):
img_name = join(seq_save_path, "%08d" % (i+1,)+".png")
cv2.imwrite(img_name, new_frames[i])
print "Sequence %s is saved at %s" % (sc, seq_save_path)
if option == "vot-figure":
vot_path = os.path.join(data_path, "vot2015")
vot_stats_path = os.path.join(stats_path, "vot_stats.pkl")
# load vot stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
no_seq = 0
vidseq = vot_list[no_seq]
seq_save_path = join(data_path, "all_imgs", "vot_figs")
list_path = join(vot_path, vidseq)
img_list = tools.create_vot_image_list(list_path, num_frames[no_seq])
gts = np.loadtxt(join(list_path, "groundtruth.txt"),
dtype=np.float32, delimiter=",")
gts = np.reshape(gts, (gts.shape[0], 4, 2))
print "[MESSAGE] Ground truths and image lists are loaded."
frames = []
for img_name in img_list:
frames.append(cv2.imread(img_name))
print "[MESSAGE] Images are loaded"
new_frames = gui.draw_poly_box_sequence(frames, gts)
for i in xrange(len(new_frames)):
img_name = join(seq_save_path, "%08d" % (i+1,)+".png")
cv2.imwrite(img_name, new_frames[i])
print "Sequence %s is saved at %s" % (vidseq, seq_save_path)
if option == "vot-dvs-figure":
vot_fn = "INI_VOT_30fps_20160424.hdf5"
vot_path = os.path.join(data_path, vot_fn)
vot_db = h5py.File(vot_path, mode="r")
vot_stats_path = os.path.join(stats_path, "vot_stats.pkl")
# load vot stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
vidseq = vot_list[1]
seq_save_path = join(data_path, "all_imgs", "vot_dvs_figs")
num_frames = int(vot_db[vidseq].attrs["num_frames"])
timestamps = vot_db[vidseq]["timestamps"][()]
x_pos = vot_db[vidseq]["x_pos"][()]
y_pos = vot_db[vidseq]["y_pos"][()]
pol = vot_db[vidseq]["pol"][()]
bounding_box = vot_db[vidseq]["bounding_box"][()]
gt = bounding_box[:, 1:]
gt = np.reshape(gt, (gt.shape[0], 4, 2))
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol, window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
pol, num_frames, fs=3)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
rgb_frames = []
height = new_frames[0].shape[0]
width = new_frames[0].shape[1]
for frame in new_frames:
temp_frame = np.zeros((height, width, 3))
temp_frame[:, :, 0] = frame
temp_frame[:, :, 1] = frame
temp_frame[:, :, 2] = frame
rgb_frames.append(temp_frame)
new_frames = gui.draw_poly_box_sequence(rgb_frames, gt,
color=[0, 0, 255])
for i in xrange(len(new_frames)):
img_name = join(seq_save_path, "%08d" % (i+1,)+".png")
cv2.imwrite(img_name, new_frames[i])
print "Sequence %s is saved at %s" % (vidseq, seq_save_path)
if option == "white-test":
test_path = os.path.join(data_path, "test.aedat")
(timestamps, xaddr, yaddr, pol) = dvsproc.loadaerdat(test_path)
event_arr = dvsproc.cal_event_count(timestamps)
event_freq = dvsproc.cal_event_freq(event_arr, window=1000)
plt.figure(figsize=(18, 8))
plt.plot(event_freq[:, 0]/1e3, event_freq[:, 1], linewidth=2)
plt.xlabel("Time (s)")
plt.ylabel("Event Frequency")
plt.savefig(os.path.join(data_path, "event_freq.pdf"))
timestamps = timestamps-timestamps[0]
timestamps = timestamps[:10000]
tend = timestamps[-1]
vv = np.zeros((tend+1,))
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(24, 10))
# plt.ylim([0, 3e-3])
plt.xlim([0, 100])
plt.grid(True)
plt.plot(f_draw, ff_draw, 'b', linewidth=2)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 10))
plt.xlabel("Frequency [Hz]")
plt.ylabel("Events")
plt.savefig(os.path.join(data_path, "white_test_ps.pdf"))
if option == "caltech256-identity-wrong-files":
caltech_fn = "Caltech256_10fps_20160411.hdf5"
caltech_path = os.path.join(data_path, caltech_fn)
caltech_db = h5py.File(caltech_path, mode="r")
caltech_stats_path = os.path.join(stats_path, "caltech256_stats.pkl")
img_num = 30
f = file(caltech_stats_path, mode="r")
caltech_stats = pickle.load(f)
f.close()
caltech_list = caltech_stats["caltech256_list"]
cn = caltech_list[62]
img_name = caltech_stats[cn][63 - 1]
print img_name
img_n, img_ex = os.path.splitext(img_name)
seq_save_path = os.path.join(data_path, "caltech256_figs_exp",
img_n + ".gif")
if not os.path.isfile(seq_save_path):
num_frames = int(caltech_db[cn][img_n].attrs["num_frames"])
print "Number of frames: ", num_frames
timestamps = caltech_db[cn][img_n]["timestamps"][()]
x_pos = caltech_db[cn][img_n]["x_pos"][()]
y_pos = caltech_db[cn][img_n]["y_pos"][()]
pol = caltech_db[cn][img_n]["pol"][()]
print timestamps
print x_pos
print y_pos.shape
print pol.shape
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol, window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
pol, num_frames, fs=3)
print "Length of produced frames: ", len(frames)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
clip = ImageSequenceClip(new_frames, fps=20)
clip.write_gif(seq_save_path, fps=30)
print "Sequence %s is saved at %s" % (img_name, seq_save_path)
elif option == "caltech256-ps":
caltech_fn = "INI_Caltech256_10fps_20160424.hdf5"
caltech_path = os.path.join(data_path, caltech_fn)
caltech_db = h5py.File(caltech_path, mode="r")
caltech_stats_path = os.path.join(stats_path, "caltech256_stats.pkl")
caltech_save_path = os.path.join(data_path, "caltech256_ps.eps")
img_num = 60
f = file(caltech_stats_path, mode="r")
caltech_stats = pickle.load(f)
f.close()
caltech_list = caltech_stats["caltech256_list"]
cn = caltech_list[0]
img_name = caltech_stats[cn][img_num-1]
img_n, img_ex = os.path.splitext(img_name)
timestamps = caltech_db[cn][img_n]["timestamps"][()]
print "[MESSAGE] DATA IS LOADED."
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
# plt.ylim([0, 3e-3])
plt.xlim([0, 100])
# plt.grid(True)
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 2.0e-1, 0.3e-1))
plt.xlabel("Frequency [Hz]", fontsize=150)
# plt.ylabel("Events", fontsize=100)
plt.savefig(caltech_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
# plt.show()
print "[MESSAGE] Power Spectrum is saved at %s" % (caltech_save_path)
elif option == "ucf50-ps":
ucf50_fn = "INI_UCF50_30fps_20160424.hdf5"
ucf50_path = os.path.join(data_path, ucf50_fn)
ucf50_db = h5py.File(ucf50_path, mode="r")
ucf50_stats_path = os.path.join(stats_path, "ucf50_stats.pkl")
vid_num = 11
ucf50_save_path = os.path.join(data_path, "ucf50_ps.eps")
f = file(ucf50_stats_path, mode="r")
ucf50_stats = pickle.load(f)
f.close()
ucf50_list = ucf50_stats["ucf50_list"]
cn = ucf50_list[0]
vid_name = ucf50_stats[cn][vid_num-1]
vid_n, vid_ex = os.path.splitext(vid_name)
timestamps = ucf50_db[cn][vid_n]["timestamps"][()]
print "[MESSAGE] DATA IS LOADED."
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
# plt.ylim([0, 3e-3])
plt.xlim([0, 100])
# plt.grid(True)
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 2.5e-1, 0.4e-1))
plt.xlabel("Frequency [Hz]", fontsize=150)
# plt.ylabel("Events", fontsize=100)
plt.savefig(ucf50_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
# plt.show()
print "[MESSAGE] Power Spectrum is saved at %s" % (ucf50_save_path)
elif option == "tracking-ps":
tracking_fn = "INI_TrackingDataset_30fps_20160424.hdf5"
tracking_path = os.path.join(data_path, tracking_fn)
tracking_db = h5py.File(tracking_path, mode="r")
tracking_stats_path = os.path.join(stats_path, "tracking_stats.pkl")
tracking_save_path = os.path.join(data_path, "tracking_ps.eps")
f = file(tracking_stats_path, mode="r")
tracking_stats = pickle.load(f)
f.close()
pl = tracking_stats["primary_list"]
sl = tracking_stats["secondary_list"]
pc = pl[0]
sc = sl[pc][1]
timestamps = tracking_db[pc][sc]["timestamps"][()]
print "[MESSAGE] DATA IS LOADED."
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
# plt.ylim([0, 3e-3])
plt.xlim([0, 100])
# plt.grid(True)
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 7e-2+2e-2, 1.5e-2))
plt.xlabel("Frequency [Hz]", fontsize=150)
# plt.ylabel("Events", fontsize=100)
plt.savefig(tracking_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
print "[MESSAGE] Power Spectrum is saved at %s" % (tracking_save_path)
elif option == "vot-ps":
vot_fn = "INI_VOT_30fps_20160424.hdf5"
vot_path = os.path.join(data_path, vot_fn)
vot_db = h5py.File(vot_path, mode="r")
vot_stats_path = os.path.join(stats_path, "vot_stats.pkl")
vot_save_path = os.path.join(data_path, "vot_ps.eps")
# load vot stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
vidseq = vot_list[9]
timestamps = vot_db[vidseq]["timestamps"][()]
print "[MESSAGE] DATA IS LOADED."
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
# plt.ylim([0, 3e-3])
plt.xlim([0, 100])
# plt.grid(True)
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 1.8e-1+0.3e-1, 0.3e-1))
plt.xlabel("Frequency [Hz]", fontsize=150)
# plt.ylabel("Events", fontsize=100)
plt.savefig(vot_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
# plt.show()
print "[MESSAGE] Power Spectrum is saved at %s" % (vot_save_path)
elif option == "event-frequency":
vot_fn = "INI_VOT_30fps_20160424.hdf5"
vot_path = os.path.join(data_path, vot_fn)
vot_db = h5py.File(vot_path, mode="r")
vot_stats_path = os.path.join(stats_path, "vot_stats.pkl")
# load vot stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
vidseq = vot_list[2]
timestamps = vot_db[vidseq]["timestamps"][()]
event_arr = dvsproc.cal_event_count(timestamps)
event_freq = dvsproc.cal_event_freq(event_arr, window=1000)
plt.figure(figsize=(54, 24))
plt.plot(event_freq[:, 0]/1e6, event_freq[:, 1], linewidth=10)
plt.xlabel("Time (s)", fontsize=100)
plt.ylabel("Event Frequency", fontsize=100)
plt.savefig(os.path.join(data_path, "event_freq.eps"),
format="eps", dpi=1200, bbox_inches='tight',
pad_inches=0.5)
elif option == "mnist-dvs":
mnist_path = os.path.join(data_path, "MNIST_DVS")
for i in xrange(10):
base_path = os.path.join(mnist_path, str(i))
s4_path = os.path.join(base_path, "mnist_"+str(i)+"_scale04.aedat")
s8_path = os.path.join(base_path, "mnist_"+str(i)+"_scale08.aedat")
s16_path = os.path.join(base_path, "mnist_"+str(i)+"_scale16.aedat")
for p in [s4_path, s8_path, s16_path]:
p_n, p_ex = os.path.splitext(p)
(timestamps, xaddr,
yaddr, pol) = dvsproc.loadaerdat(p, camera='DVS128')
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, xaddr, yaddr,
pol, num_frames=10, fs=5,
platform="linux2",
device="DVS128")
frame = ((frames[1]+fs)/float(2*fs)*256).astype(np.uint8)
cv2.imwrite(p_n+".png", frame)
print "[MESSAGE] Image for recording %s is generated" % p
elif option == "mnist-dvs-ps":
mnist_path = os.path.join(data_path, "MNIST_DVS")
mnist_save_path = os.path.join(mnist_path, "ps_mnist_dvs.pdf")
i = 4
base_path = os.path.join(mnist_path, str(i))
s4_path = os.path.join(base_path, "mnist_"+str(i)+"_scale04.aedat")
s8_path = os.path.join(base_path, "mnist_"+str(i)+"_scale08.aedat")
s16_path = os.path.join(base_path, "mnist_"+str(i)+"_scale16.aedat")
(timestamps, xaddr,
yaddr, pol) = dvsproc.loadaerdat(s4_path, camera='DVS128')
print "[MESSAGE] DATA IS LOADED."
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f[:450]
ff_draw = 2*np.abs(ff[:450])
plt.figure(figsize=(18, 8))
plt.ylim([0, 3e-3])
plt.xlim([0, 100])
plt.grid(True)
plt.plot(f_draw, ff_draw, 'b', linewidth=2)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(np.min(f_draw), np.max(f_draw)+1, 10))
plt.xlabel("Frequency [Hz]")
plt.ylabel("Events")
plt.savefig(mnist_save_path)
# plt.show()
print "[MESSAGE] Power Spectrum is saved at %s" % (mnist_save_path)
elif option == "nmnist":
nmnist_path = os.path.join(data_path, "N_MNIST")
for i in xrange(10):
file_path = os.path.join(nmnist_path, str(i)+".bin")
f_n, f_ex = os.path.splitext(file_path)
print "[MESSAGE] Loading %s" % (file_path)
file_handle = open(file_path, 'rb')
raw_data = np.fromfile(file_handle, dtype=np.uint8)
file_handle.close()
raw_data = np.uint16(raw_data)
all_y = raw_data[1::5]
all_x = raw_data[0::5]
all_p = (raw_data[2::5] & 128) >> 7
all_ts = ((raw_data[2::5] & 127) << 16) | \
(raw_data[3::5] << 8) | (raw_data[4::5])
frames, fs, _ = dvsproc.gen_dvs_frames(all_ts, all_x, all_y, all_p, 3,
fs=3, platform="linux2",
device="ATIS")
frame = frames[1]
frame = ((frame[:28, :28]+fs)/float(2*fs)*256).astype(np.uint8)
cv2.imwrite(f_n+".png", frame)
print "[MESSAGE] Image for recording %s is generated" % (file_path)
elif option == "ncaltech101":
n_caltech_path = os.path.join(data_path, "N_Caltech101")
for i in xrange(16):
file_path = os.path.join(n_caltech_path,
"image_"+"%04d" % (i+1,)+".bin")
f_n, f_ex = os.path.splitext(file_path)
print "[MESSAGE] Loading %s" % (file_path)
file_handle = open(file_path, 'rb')
raw_data = np.fromfile(file_handle, dtype=np.uint8)
file_handle.close()
raw_data = np.uint16(raw_data)
all_y = raw_data[1::5]
all_x = raw_data[0::5]
all_p = (raw_data[2::5] & 128) >> 7
all_ts = ((raw_data[2::5] & 127) << 16) | \
(raw_data[3::5] << 8) | (raw_data[4::5])
max_y = np.max(all_y)
max_x = np.max(all_x)
frames, fs, _ = dvsproc.gen_dvs_frames(all_ts, all_x, all_y, all_p, 3,
fs=3, platform="linux2",
device="ATIS")
frame = frames[2][:max_y, :max_x]
frame = ((frame+fs)/float(2*fs)*256).astype(np.uint8)
cv2.imwrite(f_n+".png", frame)
print "[MESSAGE] Image for recording %s is generated" % (file_path)
elif option == "ncaltech101-ps":
n_caltech_path = os.path.join(data_path, "N_Caltech101")
n_caltech_save_path = os.path.join(n_caltech_path, "ps_ncaltech101.pdf")
timestamps = np.array([])
for i in xrange(100):
file_path = os.path.join(n_caltech_path,
"image_" + "%04d" % (i + 1,) + ".bin")
print "[MESSAGE] Loading %s" % (file_path)
file_handle = open(file_path, 'rb')
raw_data = np.fromfile(file_handle, dtype=np.uint8)
file_handle.close()
raw_data = np.uint16(raw_data)
all_ts = ((raw_data[2::5] & 127) << 16) | \
(raw_data[3::5] << 8) | (raw_data[4::5])
all_ts = all_ts.astype(np.float64)
if not timestamps.size:
timestamps = all_ts
else:
# all_ts -= all_ts[0]
all_ts += timestamps[-1]
timestamps = np.hstack((timestamps, all_ts))
num_data = timestamps.shape[0]
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(num_data):
if timestamps[i] < tend:
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(18, 8))
# plt.ylim([0, 2e-5])
plt.xlim([0, 100])
plt.grid(True)
plt.plot(f_draw, ff_draw, 'b', linewidth=2)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 10))
plt.xlabel("Frequency [Hz]")
plt.ylabel("Events")
plt.savefig(n_caltech_save_path)
# plt.show()
print "[MESSAGE] Power Spectrum is saved at %s" % (n_caltech_save_path)
elif option == "vot":
# Load VOT Challenge Dataset
vot_fn = "VOT_30fps_20160409.hdf5"
vot_path = os.path.join(data_path, vot_fn)
vot_db = h5py.File(vot_path, mode="r")
vot_stats_path = os.path.join(stats_path, "vot_stats.pkl")
# load vot stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
for vidseq in vot_list:
seq_save_path = os.path.join(data_path, "vot_gifs", vidseq+".gif")
if not os.path.isfile(seq_save_path):
num_frames = int(vot_db[vidseq].attrs["num_frames"])
timestamps = vot_db[vidseq]["timestamps"][()]
x_pos = vot_db[vidseq]["x_pos"][()]
y_pos = vot_db[vidseq]["y_pos"][()]
pol = vot_db[vidseq]["pol"][()]
bounding_box = vot_db[vidseq]["bounding_box"][()]
gt = bounding_box[:, 1:]
gt = np.reshape(gt, (gt.shape[0], 4, 2))
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol, window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
pol, num_frames, fs=3)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
new_frames = gui.draw_poly_box_sequence(new_frames, gt,
color=[0, 255, 0])
clip = ImageSequenceClip(new_frames, fps=20)
clip.write_gif(seq_save_path, fps=30)
print "Sequence %s is saved at %s" % (vidseq, seq_save_path)
elif option == "tracking":
tracking_fn = "TrackingDataset_30fps_20160401.hdf5"
tracking_path = os.path.join(data_path, tracking_fn)
tracking_db = h5py.File(tracking_path, mode="r")
tracking_stats_path = os.path.join(stats_path, "tracking_stats.pkl")
f = file(tracking_stats_path, mode="r")
tracking_stats = pickle.load(f)
f.close()
pl = tracking_stats["primary_list"]
sl = tracking_stats["secondary_list"]
for pc in pl:
# remove sequence Kalal until I got more memory
if pc != "Kalal":
for sc in sl[pc]:
seq_save_path = os.path.join(data_path, "tracking_gifs",
sc+".gif")
if not os.path.isfile(seq_save_path):
num_frames = int(tracking_db[pc][sc].attrs["num_frames"])
timestamps = tracking_db[pc][sc]["timestamps"][()]
x_pos = tracking_db[pc][sc]["x_pos"][()]
y_pos = tracking_db[pc][sc]["y_pos"][()]
pol = tracking_db[pc][sc]["pol"][()]
bounding_box = tracking_db[pc][sc]["bounding_box"][()]
gt = bounding_box[:, 1:]
gt = np.reshape(gt, (gt.shape[0], 4, 2))
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol,
window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos,
y_pos, pol,
num_frames, fs=3)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs) /
float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
new_frames = gui.draw_poly_box_sequence(new_frames, gt,
color=[0, 255, 0])
clip = ImageSequenceClip(new_frames, fps=20)
clip.write_gif(seq_save_path, fps=30)
print "Sequence %s is saved at %s" % (sc, seq_save_path)
elif option == "ucf50":
ucf50_fn = "UCF50_30fps_20160409.hdf5"
ucf50_path = os.path.join(data_path, ucf50_fn)
ucf50_db = h5py.File(ucf50_path, mode="r")
ucf50_stats_path = os.path.join(stats_path, "ucf50_stats.pkl")
vid_num = 10
f = file(ucf50_stats_path, mode="r")
ucf50_stats = pickle.load(f)
f.close()
ucf50_list = ucf50_stats["ucf50_list"]
for cn in ucf50_list:
vid_name = ucf50_stats[cn][vid_num-1]
vid_n, vid_ex = os.path.splitext(vid_name)
seq_save_path = os.path.join(data_path, "ucf50_gifs",
vid_n+".gif")
if not os.path.isfile(seq_save_path):
num_frames = int(ucf50_db[cn][vid_n].attrs["num_frames"])
timestamps = ucf50_db[cn][vid_n]["timestamps"][()]
x_pos = ucf50_db[cn][vid_n]["x_pos"][()]
y_pos = ucf50_db[cn][vid_n]["y_pos"][()]
pol = ucf50_db[cn][vid_n]["pol"][()]
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol, window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
pol, num_frames, fs=3)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
clip = ImageSequenceClip(new_frames, fps=20)
clip.write_gif(seq_save_path, fps=30)
print "Sequence %s is saved at %s" % (vid_name, seq_save_path)
elif option == "caltech256":
caltech_fn = "Caltech256_10fps_20160411.hdf5"
caltech_path = os.path.join(data_path, caltech_fn)
caltech_db = h5py.File(caltech_path, mode="r")
caltech_stats_path = os.path.join(stats_path, "caltech256_stats.pkl")
img_num = 30
f = file(caltech_stats_path, mode="r")
caltech_stats = pickle.load(f)
f.close()
caltech_list = caltech_stats["caltech256_list"]
for cn in caltech_list:
img_name = caltech_stats[cn][img_num-1]
img_n, img_ex = os.path.splitext(img_name)
seq_save_path = os.path.join(data_path, "caltech256_figs_exp",
img_n+".gif")
if not os.path.isfile(seq_save_path):
num_frames = int(caltech_db[cn][img_n].attrs["num_frames"])
print "Number of frames: ", num_frames
timestamps = caltech_db[cn][img_n]["timestamps"][()]
x_pos = caltech_db[cn][img_n]["x_pos"][()]
y_pos = caltech_db[cn][img_n]["y_pos"][()]
pol = caltech_db[cn][img_n]["pol"][()]
(timestamps, x_pos,
y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
y_pos, pol, window=1000)
frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
pol, num_frames, fs=3)
print "Length of produced frames: ", len(frames)
new_frames = []
for frame in frames:
tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
new_frames.append(tmp_frame)
clip = ImageSequenceClip(new_frames, fps=20)
clip.write_gif(seq_save_path, fps=30)
print "Sequence %s is saved at %s" % (img_name, seq_save_path)
if option == "gui-show":
# Put text on screen
image_path = os.path.join(data_path, "vot2015", "motocross1",
"00000106.jpg")
frame = cv2.imread(image_path)
win_w = 720
win_h = 540
scale = 0.9
window_title = "DVS-VOT-EXP"
bg_color = [127, 127, 127]
message = "Experiment Setup Calibration"
# Check if input window is 4:3
if float(win_h)/float(win_w) != 0.75:
raise ValueError("the input window is not in ratio 4:3")
# get stats of smaller window
swin_h = int(scale*win_h)
swin_w = int(scale*win_w)
frame = gui.rescale_image(frame, swin_h, swin_w, color=bg_color)
window = np.ones((win_h, win_w, 3))*bg_color
diff_y = (win_h-swin_h)/2
diff_x = (win_w-swin_w)/2
window[diff_y:swin_h+diff_y, diff_x:swin_w+diff_x, :] = frame
window = np.array(window, dtype=np.uint8)
flag = True
while (1):
# draw such window
if flag is True:
temp_win = window.copy()
cv2.rectangle(temp_win, (diff_x, diff_y),
(diff_x+swin_w, diff_y+swin_h), color=[0, 255, 0],
thickness=2)
flag = True
elif flag is False:
temp_win = window.copy()
flag = True
cv2.imshow(window_title, temp_win)
k = cv2.waitKey(delay=10) & 0xFF
if k == 27:
break
print "[MESSAGE] Experiment setup calibration is finished."
if option == "y-time-figure":
ucf50_fn = "INI_UCF50_30fps_20160424.hdf5"
ucf50_path = join(data_path, ucf50_fn)
ucf50_db = h5py.File(ucf50_path, mode="r")
ucf50_stats_path = os.path.join(stats_path, "ucf50_stats.pkl")
vid_num = 50
f = file(ucf50_stats_path, mode="r")
ucf50_stats = pickle.load(f)
f.close()
ucf50_list = ucf50_stats["ucf50_list"]
cn = "Drumming"
vid_name = ucf50_stats[cn][vid_num-1]
vid_n, vid_ex = os.path.splitext(vid_name)
seq_save_path = os.path.join(data_path, "all_imgs", "ucf50_dvs_figs")
num_frames = int(ucf50_db[cn][vid_n].attrs["num_frames"])
timestamps = ucf50_db[cn][vid_n]["timestamps"][()]
x_pos = ucf50_db[cn][vid_n]["x_pos"][()]
y_pos = ucf50_db[cn][vid_n]["y_pos"][()]
pol = ucf50_db[cn][vid_n]["pol"][()]
time = timestamps[3000:4000]
x_idx = x_pos[3000:4000]
y_idx = y_pos[3000:4000]
plt.figure(figsize=(30, 6))
plt.plot(time/1e3, y_idx, ".", linewidth=2)
plt.ylim([0, 180])
plt.xlabel("Time (ms)")
plt.ylabel("y")
plt.savefig(os.path.join(data_path, "y-time-figure.png"))
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(30, 15))
ax = fig.gca(projection='3d')
ax.plot(time/1e3, x_idx, y_idx, ".", linewidth=2)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('X')
ax.set_zlabel('Y')
fig.savefig(os.path.join(data_path, "x-y-time-figure.png"))
| 35.98835
| 78
| 0.590671
| 5,457
| 37,068
| 3.785596
| 0.071285
| 0.025559
| 0.034853
| 0.045406
| 0.823603
| 0.803611
| 0.787734
| 0.766822
| 0.752541
| 0.739907
| 0
| 0.048127
| 0.25839
| 37,068
| 1,029
| 79
| 36.023324
| 0.703347
| 0.027976
| 0
| 0.708075
| 0
| 0
| 0.111535
| 0.01265
| 0
| 0
| 0.000111
| 0
| 0
| 0
| null | null | 0
| 0.013665
| null | null | 0.053416
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
174a6026e044b743b43a3f936d0634b389d53b10
| 18,213
|
py
|
Python
|
purchases/migrations/0001_initial.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 11
|
2021-01-23T01:09:54.000Z
|
2021-01-25T07:16:30.000Z
|
purchases/migrations/0001_initial.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 7
|
2021-04-06T18:19:10.000Z
|
2021-09-22T19:45:03.000Z
|
purchases/migrations/0001_initial.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 3
|
2021-01-23T18:55:32.000Z
|
2021-02-16T17:47:59.000Z
|
# Generated by Django 3.1.3 on 2021-01-01 15:00
import accountancy.fields
import accountancy.mixins
import accountancy.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import purchases.models
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('nominals', '0001_initial'),
('contacts', '0001_initial'),
('controls', '0001_initial'),
('cashbook', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vat', '__first__'),
]
operations = [
migrations.CreateModel(
name='PurchaseHeader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref', models.CharField(max_length=20)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('discount', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('total', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('paid', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('due', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('date', models.DateField()),
('due_date', models.DateField(blank=True, null=True)),
('status', models.CharField(choices=[('c', 'cleared'), ('v', 'void')], default='c', max_length=2)),
('created', models.DateTimeField(auto_now_add=True)),
('type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('cash_book', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cashbook.cashbook')),
],
options={
'permissions': [('view_transactions_enquiry', 'Can view transactions'), ('view_age_creditors_report', 'Can view aged creditors report'), ('create_brought_forward_invoice_transaction', 'Can create brought forward invoice'), ('create_brought_forward_credit_note_transaction', 'Can create brought forward credit note'), ('create_brought_forward_payment_transaction', 'Can create brought forward payment'), ('create_brought_forward_refund_transaction', 'Can create brought forward refund'), ('create_invoice_transaction', 'Can create invoice'), ('create_credit_note_transaction', 'Can create credit note'), ('create_payment_transaction', 'Can create payment'), ('create_refund_transaction', 'Can create refund'), ('edit_brought_forward_invoice_transaction', 'Can edit brought forward invoice'), ('edit_brought_forward_credit_note_transaction', 'Can edit brought forward credit note'), ('edit_brought_forward_payment_transaction', 'Can edit brought forward payment'), ('edit_brought_forward_refund_transaction', 'Can edit brought forward refund'), ('edit_invoice_transaction', 'Can edit invoice'), ('edit_credit_note_transaction', 'Can edit credit note'), ('edit_payment_transaction', 'Can edit payment'), ('edit_refund_transaction', 'Can edit refund'), ('view_brought_forward_invoice_transaction', 'Can view brought forward invoice'), ('view_brought_forward_credit_note_transaction', 'Can view brought forward credit note'), ('view_brought_forward_payment_transaction', 'Can view brought forward payment'), ('view_brought_forward_refund_transaction', 'Can view brought forward refund'), ('view_invoice_transaction', 'Can view invoice'), ('view_credit_note_transaction', 'Can view credit note'), ('view_payment_transaction', 'Can view payment'), ('view_refund_transaction', 'Can view refund'), ('void_brought_forward_invoice_transaction', 'Can void brought forward invoice'), ('void_brought_forward_credit_note_transaction', 'Can void brought forward credit note'), ('void_brought_forward_payment_transaction', 'Can void brought forward payment'), ('void_brought_forward_refund_transaction', 'Can void brought forward refund'), ('void_invoice_transaction', 'Can void invoice'), ('void_credit_note_transaction', 'Can void credit note'), ('void_payment_transaction', 'Can void payment'), ('void_refund_transaction', 'Can void refund')],
},
bases=(purchases.models.ModuleTransactions, accountancy.mixins.AuditMixin, accountancy.models.TransactionBase, models.Model),
),
migrations.CreateModel(
name='Supplier',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('contacts.contact',),
),
migrations.CreateModel(
name='PurchaseMatching',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('value', accountancy.fields.AccountsDecimalField(blank=True, decimal_places=2, default=0, max_digits=10)),
('matched_by_type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('matched_to_type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('matched_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matched_by_these', to='purchases.purchaseheader')),
('matched_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matched_to_these', to='purchases.purchaseheader')),
('period', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='controls.period')),
],
options={
'abstract': False,
},
bases=(accountancy.mixins.AuditMixin, models.Model),
),
migrations.CreateModel(
name='PurchaseLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('line_no', models.IntegerField()),
('description', models.CharField(max_length=100)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('goods_nominal_transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='purchase_good_line', to='nominals.nominaltransaction')),
('header', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='purchases.purchaseheader')),
('nominal', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='nominals.nominal')),
('total_nominal_transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='purchase_total_line', to='nominals.nominaltransaction')),
('vat_code', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='vat.vat', verbose_name='Vat Code')),
('vat_nominal_transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='purchase_vat_line', to='nominals.nominaltransaction')),
('vat_transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='purchase_line_vat_transaction', to='vat.vattransaction')),
],
options={
'ordering': ['line_no'],
},
bases=(purchases.models.ModuleTransactions, accountancy.mixins.AuditMixin, accountancy.models.TransactionBase, models.Model),
),
migrations.AddField(
model_name='purchaseheader',
name='matched_to',
field=models.ManyToManyField(through='purchases.PurchaseMatching', to='purchases.PurchaseHeader'),
),
migrations.AddField(
model_name='purchaseheader',
name='period',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='controls.period'),
),
migrations.AddField(
model_name='purchaseheader',
name='supplier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='purchases.supplier'),
),
migrations.CreateModel(
name='HistoricalPurchaseMatching',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', models.DateField(blank=True, editable=False)),
('value', accountancy.fields.AccountsDecimalField(blank=True, decimal_places=2, default=0, max_digits=10)),
('matched_by_type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('matched_to_type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('matched_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='purchases.purchaseheader')),
('matched_to', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='purchases.purchaseheader')),
('period', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='controls.period')),
],
options={
'verbose_name': 'historical purchase matching',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPurchaseLine',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('line_no', models.IntegerField()),
('description', models.CharField(max_length=100)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('goods_nominal_transaction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominaltransaction')),
('header', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='purchases.purchaseheader')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('nominal', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominal')),
('total_nominal_transaction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominaltransaction')),
('vat_code', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='vat.vat', verbose_name='Vat Code')),
('vat_nominal_transaction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominaltransaction')),
('vat_transaction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='vat.vattransaction')),
],
options={
'verbose_name': 'historical purchase line',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalPurchaseHeader',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('ref', models.CharField(max_length=20)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('discount', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('total', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('paid', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('due', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('date', models.DateField()),
('due_date', models.DateField(blank=True, null=True)),
('status', models.CharField(choices=[('c', 'cleared'), ('v', 'void')], default='c', max_length=2)),
('created', models.DateTimeField(blank=True, editable=False)),
('type', models.CharField(choices=[('pbi', 'Brought Forward Invoice'), ('pbc', 'Brought Forward Credit Note'), ('pbp', 'Brought Forward Payment'), ('pbr', 'Brought Forward Refund'), ('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note')], max_length=3)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('cash_book', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='cashbook.cashbook')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('period', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='controls.period')),
('supplier', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='purchases.supplier')),
],
options={
'verbose_name': 'historical purchase header',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
| 90.61194
| 2,345
| 0.651842
| 1,991
| 18,213
| 5.767956
| 0.086891
| 0.078022
| 0.036573
| 0.057471
| 0.837252
| 0.744514
| 0.708986
| 0.704371
| 0.704371
| 0.698276
| 0
| 0.008052
| 0.188547
| 18,213
| 200
| 2,346
| 91.065
| 0.768997
| 0.002471
| 0
| 0.601036
| 1
| 0
| 0.296433
| 0.097985
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041451
| 0
| 0.062176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1763531b636a995563a45bfe2dadd8f98afbca84
| 6,526
|
py
|
Python
|
pinax/stripe/migrations/0014_blank_with_null.py
|
lock8/pinax-stripe
|
50e846e41718646e85219d31676566ebc3fea477
|
[
"MIT"
] | null | null | null |
pinax/stripe/migrations/0014_blank_with_null.py
|
lock8/pinax-stripe
|
50e846e41718646e85219d31676566ebc3fea477
|
[
"MIT"
] | 114
|
2017-10-18T09:14:02.000Z
|
2019-01-24T19:03:01.000Z
|
pinax/stripe/migrations/0014_blank_with_null.py
|
lock8/pinax-stripe
|
50e846e41718646e85219d31676566ebc3fea477
|
[
"MIT"
] | 1
|
2017-10-20T08:13:09.000Z
|
2017-10-20T08:13:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-23 15:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('pinax_stripe', '0013_charge_outcome'),
]
operations = [
migrations.AlterField(
model_name='account',
name='tos_acceptance_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='charge',
name='amount',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='charge',
name='amount_refunded',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='charge',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Customer'),
),
migrations.AlterField(
model_name='charge',
name='invoice',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='pinax_stripe.Invoice'),
),
migrations.AlterField(
model_name='charge',
name='source',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='coupon',
name='amount_off',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='coupon',
name='duration_in_months',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='coupon',
name='max_redemptions',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='coupon',
name='percent_off',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='coupon',
name='redeem_by',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='coupon',
name='times_redeemed',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='customer',
name='date_purged',
field=models.DateTimeField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name='discount',
name='customer',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'),
),
migrations.AlterField(
model_name='discount',
name='end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='discount',
name='start',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='discount',
name='subscription',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'),
),
migrations.AlterField(
model_name='event',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Customer'),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='event',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Event'),
),
migrations.AlterField(
model_name='invoice',
name='attempt_count',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='invoice',
name='charge',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='pinax_stripe.Charge'),
),
migrations.AlterField(
model_name='invoice',
name='subscription',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'),
),
migrations.AlterField(
model_name='invoice',
name='tax',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='invoice',
name='tax_percent',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='invoice',
name='webhooks_delivered_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='plan',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan'),
),
migrations.AlterField(
model_name='invoiceitem',
name='quantity',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='subscription',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription'),
),
migrations.AlterField(
model_name='plan',
name='trial_period_days',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='subscription',
name='application_fee_percent',
field=models.DecimalField(blank=True, decimal_places=2, default=None, max_digits=3, null=True),
),
]
| 38.845238
| 156
| 0.596537
| 644
| 6,526
| 5.900621
| 0.167702
| 0.157895
| 0.197368
| 0.228947
| 0.808947
| 0.800263
| 0.742368
| 0.737895
| 0.707368
| 0.676053
| 0
| 0.007689
| 0.282562
| 6,526
| 167
| 157
| 39.077844
| 0.80393
| 0.01042
| 0
| 0.675
| 1
| 0
| 0.126723
| 0.031913
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025
| 0
| 0.04375
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
bd58916cc69b575e1c82db88c216ca06999b7d20
| 24,004
|
py
|
Python
|
code/generateTmatrix.py
|
AdityaMate/collapsing_bandits
|
2aecccc6fd986f869088438ea5eba7bbfd5c1e91
|
[
"MIT"
] | 6
|
2020-11-27T10:33:54.000Z
|
2022-02-28T11:13:34.000Z
|
code/generateTmatrix.py
|
AdityaMate/collapsing_bandits
|
2aecccc6fd986f869088438ea5eba7bbfd5c1e91
|
[
"MIT"
] | null | null | null |
code/generateTmatrix.py
|
AdityaMate/collapsing_bandits
|
2aecccc6fd986f869088438ea5eba7bbfd5c1e91
|
[
"MIT"
] | 1
|
2021-09-15T05:21:47.000Z
|
2021-09-15T05:21:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:24:48 2020
@author: adityamate, killian-34
"""
import numpy as np
import pandas as pd
import time
import pomdp
from itertools import combinations
from whittle import *
from utils import *
import os
import argparse
import tqdm
def computeAverageTmatrixFromData(N, file_root='.', epsilon=0.005):
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
fname = os.path.join(file_root, 'data/patient_T_matrices.npy')
real = np.load(fname)
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
penalty_pass_00=0
penalty_pass_11=0
#Active action transition probabilities
benefit_act_00=0
benefit_act_11=0
avg = real.mean(axis=0)
# for i in range(N):
T_base = np.zeros((2,2))
T_base[0,0] = avg[0]
T_base[1,1] = avg[1]
T_base[0,1] = 1 - T_base[0,0]
T_base[1,0] = 1 - T_base[1,1]
T_base = smooth_real_probs(T_base, epsilon)
shift = 0.05
# Patient responds well to call
benefit_act_00=np.random.uniform(low=0., high=shift) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift) # will add to prob of staying 1,1
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift) # will add to prob of staying 0,0
T_pass = np.copy(T_base)
T_act = np.copy(T_base)
T_act[0,0] = max(0, T_act[0,0] - benefit_act_00)
T_act[1,1] = min(1, T_act[1,1] + benefit_act_11)
T_pass[0,0] = min(1, T_pass[0,0] + penalty_pass_00)
T_pass[1,1] = max(0, T_pass[1,1] - penalty_pass_11)
T_pass[0,1] = 1 - T_pass[0,0]
T_pass[1,0] = 1 - T_pass[1,1]
T_act[0,1] = 1 - T_act[0,0]
T_act[1,0] = 1 - T_act[1,1]
T_pass = epsilon_clip(T_pass, epsilon)
T_act = epsilon_clip(T_act, epsilon)
#print(T_pass)
#print(T_act)
#print()
if not verify_T_matrix(np.array([T_pass, T_act])):
print("T matrix invalid\n",np.array([T_pass, T_act]))
raise ValueError()
for i in range(N):
T[i,0]=T_pass
T[i,1]=T_act
return T
# See page 7 of:
# https://projects.iq.harvard.edu/files/teamcore/files/2016_15_teamcore_aamas2016_eve_yundi.pdf
def specialTmatrix(N, kfrac=10, distribution=[0.5, 0.5], delta=0.02, option=2, badf=50):
option =3
if option==0:
T=np.zeros((N,2,2,2))
patient_descriptions=[]
T_p_01=[0.3, 0.3]
T_p_11=[0.97, 0.1]
T_a_01=[0.3, 0.9]
T_a_11=[0.97, 0.97]
for i in range(N):
index=np.random.choice(range(len(distribution)), p=distribution)
T[i][0][0][1]=np.random.uniform(T_p_01[index]-delta, T_p_01[index]+delta)
T[i][0][1][1]=np.random.uniform(T_p_11[index]-delta, T_p_11[index]+delta)
T[i][1][0][1]=np.random.uniform(T_a_01[index]-delta, T_a_01[index]+delta)
T[i][1][1][1]=np.random.uniform(T_a_11[index]-delta, T_a_11[index]+delta)
return T
elif option==1:
T=np.zeros((N,2,2,2))
k=int(kfrac*N/100.)
# Myopic wants to pull type 2
'''
type1 = np.array( [[[0.9, 0.1],
[0.6, 0.41]],
[[0.6, 0.4],
[0.3, 0.7]]])
type2 = np.array( [[[0.9, 0.1],
[0.6, 0.4]],
[[0.6, 0.4],
[0.3, 0.7]]])
'''
type1 = np.array( [[[0.6, 0.4],
[0.29, 0.71]],
[[0.35, 0.65],
[0.05, 0.95]]])
type2 = np.array( [[[0.6, 0.4],
[0.3, 0.7]],
[[0.35, 0.65],
[0.05, 0.95]]])
for i in range(k):
T[i] = type2
for j in range(k, N):
type1 = np.array( [[[0.6, 0.4],
[0.29, 0.71+ j*0.001]],
[[0.35, 0.65],
[0.05, 0.95]]])
T[j]=type1
print ("Returning T matrix: ")
print ("N: ", N, "k: ", k)
print ("shape: ", T.shape)
return T
elif option==2:
T=np.zeros((N,2,2,2))
type1= [[[0.97, 0.03],
[0.03, 0.97]],
[[0.96, 0.04],
[0.01, 0.99]]]
type2 = [[[0.25, 0.75],
[0.03, 0.97]],
[[0.23, 0.77],
[0.01 , 0.99 ]]]
T[0]=type1
T[1]=type2
return T
elif option==3:
shift1= 0.05
shift2= 0.05
shift3= 0.05
shift4= 0.05
epsilon=0.01
T=np.zeros((N,2,2,2))
type1= [[[0.97, 0.03],
[0.03, 0.97]],
[[0.96, 0.04],
[0.01, 0.99]]] ###### Bad patient
type2 = [[[0.25, 0.75],
[0.03, 0.97]],
[[0.23, 0.77],
[0.01 , 0.99 ]]] ##### Good patient (self-healing)
for i in range(N):
types=[type1, type2]
type_choice=types[np.random.choice([0, 1],p=[badf/100., 1-(badf/100.)])]
T[i]=np.array(type_choice)
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
benefit_act_00=np.random.uniform(low=0., high=shift1) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift2) # will add to prob of staying 1,1
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift3) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift4) # will add to prob of staying 0,0
T[i][1][0][0]= max(0, T[i][1][0][0] - benefit_act_00)
T[i][1][1][1]= min(1, T[i][1][1][1] + benefit_act_11)
T[i][0][0][0]= min(1, T[i][0][0][0] + penalty_pass_00)
T[i][0][1][1]= max(0, T[i][0][1][1] - penalty_pass_11)
T[i][0][0][1]= 1- T[i][0][0][0]
T[i][0][1][0]= 1- T[i][0][1][1]
T[i][1][0][1]= 1- T[i][1][0][0]
T[i][1][1][0]= 1- T[i][1][1][1]
T[i][0]=epsilon_clip(T[i][0], epsilon)
T[i][1]=epsilon_clip(T[i][1], epsilon)
return T
def generateYundiMyopicFailTmatrix():
# Return a randomly generated T matrix (not unformly random because of sorting)
T=np.zeros((2,2,2,2))
# T[0] = [[[0.95, 0.05],
# [0.05, 0.95]],
# [[0.99, 0.01],
# [0.1, 0.9]]]
# T[1] = [[[0.4, 0.6],
# [0.1, 0.9]],
# [[0.7, 0.3],
# [0.4, 0.6]]]
T[0] = [[[0.99, 0.01],
[0.1, 0.9]],
[[0.95, 0.05],
[0.05, 0.95]]]
T[1] = [[[0.7, 0.3],
[0.4, 0.6]],
[[0.4, 0.6],
[0.1, 0.9]]]
return T
def generateRandomTmatrix(N, random_stream):
# Return a randomly generated T matrix (not unformly random because of sorting)
T=np.zeros((N,2,2,2))
for i in range(N):
p_pass_01, p_pass_11, p_act_01, p_act_11=sorted(random_stream.uniform(size=4))
T[i,0]=np.array([[1-p_pass_01, p_pass_01],[1-p_pass_11, p_pass_11]])
T[i,1]=np.array([[1-p_act_01, p_act_01],[1-p_act_11, p_act_11]])
return T
def generateTmatrix(N, responsive_patient_fraction=0.4,
range_pass_00=(0.8,1.0), range_pass_11=(0.6,0.9),
range_act_g_00=(0,0.2),range_act_g_11=(0.9,1.0),
range_act_b_00=(0.6,0.8), range_act_b_11=(0.9,1.0)):
# p_act01 < p01/(p01+p10)
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
p_pass_00=np.random.uniform(low=range_pass_00[0], high=range_pass_00[1], size=N)
p_pass_11=np.random.uniform(low=range_pass_11[0], high=range_pass_11[1], size=N)
#Active action transition probabilities
#responsive_patient_fraction=0.4
p_act_00=np.zeros(N)
p_act_11=np.zeros(N)
for i in range(N):
if np.random.binomial(1,responsive_patient_fraction)==1:
# Patient responds well to call
p_act_00[i]=np.random.uniform(low=range_act_g_00[0], high=range_act_g_00[1])
p_act_11[i]=np.random.uniform(low=range_act_g_11[0], high=range_act_g_11[1])
else:
# Patient doesn't respond well to call
p_act_00[i]=np.random.uniform(low=range_act_b_00[0], high=range_act_b_00[1])
p_act_11[i]=np.random.uniform(low=range_act_b_11[0], high=range_act_b_11[1])
for i in range(N):
T[i,0]=np.array([[p_pass_00[i], 1-p_pass_00[i]],[1-p_pass_11[i],p_pass_11[i]]])
T[i,1]=np.array([[p_act_00[i], 1-p_act_00[i]],[1-p_act_11[i],p_act_11[i]]])
#print (T[:20])
return T
# guaranteed to generate 'bad patients' according to the definition here:
# p_act01 < p01/(p01+p10) == bad
# as well as good patients according to the same.
# we only want to consider bottom chain bad patients because top chain bad patients
# would mean our action has negative effect on them which isn't realistic.
# but this gives bad separation from myopic
def generateTmatrixBadf(N, responsive_patient_fraction=0.4,
range_pass_00=(0.6,0.8), range_pass_11=(0.6,0.89),
range_act_g_00=(0,0.2),range_act_g_11=(0.9,1.0),
range_act_b_00=(0.7,0.9), range_act_b_11=(0.9,1.0)):
# print("p_act01 < p01/(p01+p10)")
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
p_pass_00=np.random.uniform(low=range_pass_00[0], high=range_pass_00[1], size=N)
p_pass_11=np.random.uniform(low=range_pass_11[0], high=range_pass_11[1], size=N)
#Active action transition probabilities
#responsive_patient_fraction=0.4
p_act_00=np.zeros(N)
p_act_11=np.zeros(N)
for i in range(N):
if np.random.binomial(1,responsive_patient_fraction)==1:
# Patient responds well to call
p_act_00[i]=np.random.uniform(low=range_act_g_00[0], high=range_act_g_00[1])
p_act_11[i]=np.random.uniform(low=range_act_g_11[0], high=range_act_g_11[1])
p_act01 = 1-p_act_00[i]
p01 = 1-p_pass_00[i]
p10 = 1-p_pass_11[i]
if p_act01 < p01/(p01+p10):
raise ValueError("Intended good patient was bad.")
else:
# Patient doesn't respond well to call
p_act_00[i]=np.random.uniform(low=range_act_b_00[0], high=range_act_b_00[1])
p_act_11[i]=np.random.uniform(low=range_act_b_11[0], high=range_act_b_11[1])
p_act01 = 1-p_act_00[i]
p01 = 1-p_pass_00[i]
p10 = 1-p_pass_11[i]
if not (p_act01 < p01/(p01+p10)):
raise ValueError("Intended bad patient was good.")
for i in range(N):
T[i,0]=np.array([[p_pass_00[i], 1-p_pass_00[i]],[1-p_pass_11[i],p_pass_11[i]]])
T[i,1]=np.array([[p_act_00[i], 1-p_act_00[i]],[1-p_act_11[i],p_act_11[i]]])
#print (T[:20])
return T
# guaranteed to generate 'bad patients' according to the definition here:
# p_act01 < p01/(p01+p10) == bad
# as well as good patients according to the same.
# we only want to consider bottom chain bad patients because top chain bad patients
# would mean our action has negative effect on them which isn't realistic.
# but this gives bad separation from myopic
def generateTmatrixFullRandom(N,badf=0.2):
# print("p_act01 < p01/(p01+p10)")
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
T=np.zeros((N,2,2,2))
for i in range(N):
should_be_bad_patient = np.random.binomial(1,badf)==1
valid = False
while not valid:
this_T = np.random.dirichlet([1,1],size=(2,2))
if should_be_bad_patient:
p_act01 = this_T[1][0][1]
p01 = this_T[0][0][1]
p10 = this_T[0][1][0]
is_bad_patient = p_act01 < p01/(p01+p10)
is_valid_matrix = verify_T_matrix(this_T)
valid = is_bad_patient and is_valid_matrix
else:
p_act01 = this_T[1][0][1]
p01 = this_T[0][0][1]
p10 = this_T[0][1][0]
is_bad_patient = p_act01 < p01/(p01+p10)
is_valid_matrix = verify_T_matrix(this_T)
valid = (not is_bad_patient) and is_valid_matrix
if should_be_bad_patient != (p_act01 < p01/(p01+p10)):
raise ValueError("Mismatch")
T[i] = this_T
# print (T)
# 1/0
return T
def generateTmatrixNIBandIB(N,thresh_opt_frac=1, beta=0.5, quick_check=False):
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
T=np.zeros((N,2,2,2))
thres_opt_patients=np.random.choice([i for i in range(N)],size=int(thresh_opt_frac*N), replace=False)
for i in range(N):
valid = False
while not valid:
this_T = np.random.dirichlet([1,1],size=(2,2))
valid = verify_T_matrix(this_T)
if valid and thresh_opt_frac is not None:
satisfies_condition=False
if i in thres_opt_patients: # Threshold opt patient
satisfies_condition=isThresholdOptimal(this_T,beta, quick_check=quick_check)
else: # Reverse Threshold opt patient
satisfies_condition=isReverseThresholdOptimal(this_T,beta, quick_check=quick_check)
valid=satisfies_condition
T[i] = this_T
# print (T)
# 1/0
return T
def generateTmatrixNIBandIBFast(N):
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
T=np.zeros((N,2,2,2))
for i in range(N):
valid = False
while not valid:
this_T = np.random.dirichlet([1,1],size=(2,2))
valid = verify_T_matrix(this_T)
T[i] = this_T
# print (T)
# 1/0
return T
# there are only 41 of 8350 cases where
# p11 < p10 results from not (p11=0.0 or p10=1.0)
def smooth_real_probs(T, epsilon):
# T = epsilon_clip(T, epsilon)
if T[1,1] < T[0,1]:
# make p11 and p01 equal so we can properly simulate
# action effects
# If it looks like this, make t01 = t11
# [[0.0, 1.0],
# [0.01, 0.99]]]
# If it looks like this, make t11 = t01
# [[0.95, 0.05],
# [1.0, 0.0]]]
if T[0,1] >= 0.5:
T[0,1] = T[1,1]
else:
T[1,1] = T[0,1]
T[0,0] = 1- T[0,1]
T[1,0] = 1- T[1,1]
return T
def generateTmatrixReal(N, file_root='.', responsive_patient_fraction=0.4, epsilon=0.005,
shift1=0,shift2=0,shift3=0,shift4=0, intervention_effect=0.05,
thresh_opt_frac=None, beta=0.5, quick_check=False):
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
fname = os.path.join(file_root+'/data/', 'patient_T_matrices.npy')
real = np.load(fname)
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
penalty_pass_00=0
penalty_pass_11=0
#Active action transition probabilities
benefit_act_00=0
benefit_act_11=0
if thresh_opt_frac is None:
choices = np.random.choice(np.arange(real.shape[0]), N, replace=True)
else:
thres_opt_patients=np.random.choice([i for i in range(N)],size=int(thresh_opt_frac*N), replace=False)
i=0
while i < N:
if thresh_opt_frac is None:
choice = choices[i]
else:
choice=np.random.choice(np.arange(real.shape[0]), 1, replace=True)[0]
T_base = np.zeros((2,2))
T_base[0,0] = real[choice][0]
T_base[1,1] = real[choice][1]
T_base[0,1] = 1 - T_base[0,0]
T_base[1,0] = 1 - T_base[1,1]
T_base = smooth_real_probs(T_base, epsilon)
shift = intervention_effect
# Patient responds well to call
benefit_act_00=np.random.uniform(low=0., high=shift) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift) # will add to prob of staying 1,1
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift) # will add to prob of staying 0,0
'''
For perturbation experiment only. TEMPORARY CODE below.
'''
"""
benefit_act_00=np.random.uniform(low=0., high=shift1) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift2) # will add to prob of staying 1,1
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift3) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift4) # will add to prob of staying 0,0
"""
T_pass = np.copy(T_base)
T_act = np.copy(T_base)
T_act[0,0] = max(0, T_act[0,0] - benefit_act_00)
T_act[1,1] = min(1, T_act[1,1] + benefit_act_11)
T_pass[0,0] = min(1, T_pass[0,0] + penalty_pass_00)
T_pass[1,1] = max(0, T_pass[1,1] - penalty_pass_11)
T_pass[0,1] = 1 - T_pass[0,0]
T_pass[1,0] = 1 - T_pass[1,1]
T_act[0,1] = 1 - T_act[0,0]
T_act[1,0] = 1 - T_act[1,1]
T_pass = epsilon_clip(T_pass, epsilon)
T_act = epsilon_clip(T_act, epsilon)
#print(T_pass)
#print(T_act)
#print()
if not verify_T_matrix(np.array([T_pass, T_act])):
print("T matrix invalid\n",np.array([T_pass, T_act]))
raise ValueError()
if thresh_opt_frac is None:
satisfies_condition=True
else:
satisfies_condition=False
if i in thres_opt_patients: # Threshold opt patient
satisfies_condition=isThresholdOptimal([T_pass,T_act],beta, quick_check=quick_check)
else: # Reverse Threshold opt patient
satisfies_condition=isReverseThresholdOptimal([T_pass,T_act],beta, quick_check=quick_check)
if satisfies_condition:
T[i,0]=T_pass
T[i,1]=T_act
i+=1
return T
def generateTmatrixRealNoReplace(N, file_root='.', epsilon=0.005,
shift1=0,shift2=0,shift3=0,shift4=0, intervention_effect=0.05):
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
fname = os.path.join(file_root, 'data/patient_T_matrices.npy')
real = np.load(fname)
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
penalty_pass_00=0
penalty_pass_11=0
#Active action transition probabilities
benefit_act_00=0
benefit_act_11=0
choices = np.random.choice(np.arange(real.shape[0]), N, replace=False)
for i,choice in enumerate(choices):
T_base = np.zeros((2,2))
T_base[0,0] = real[choice][0]
T_base[1,1] = real[choice][1]
T_base[0,1] = 1 - T_base[0,0]
T_base[1,0] = 1 - T_base[1,1]
T_base = smooth_real_probs(T_base, epsilon)
shift = intervention_effect
# Patient responds well to call
benefit_act_00=np.random.uniform(low=0., high=shift) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift) # will add to prob of staying 1,1
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift) # will add to prob of staying 0,0
'''
For perturbation experiment only. TEMPORARY CODE below.
'''
"""
benefit_act_00=np.random.uniform(low=0., high=shift1) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift2) # will add to prob of staying 1,1
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift3) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift4) # will add to prob of staying 0,0
"""
T_pass = np.copy(T_base)
T_act = np.copy(T_base)
T_act[0,0] = max(0, T_act[0,0] - benefit_act_00)
T_act[1,1] = min(1, T_act[1,1] + benefit_act_11)
T_pass[0,0] = min(1, T_pass[0,0] + penalty_pass_00)
T_pass[1,1] = max(0, T_pass[1,1] - penalty_pass_11)
T_pass[0,1] = 1 - T_pass[0,0]
T_pass[1,0] = 1 - T_pass[1,1]
T_act[0,1] = 1 - T_act[0,0]
T_act[1,0] = 1 - T_act[1,1]
T_pass = epsilon_clip(T_pass, epsilon)
T_act = epsilon_clip(T_act, epsilon)
#print(T_pass)
#print(T_act)
#print()
if not verify_T_matrix(np.array([T_pass, T_act])):
print("T matrix invalid\n",np.array([T_pass, T_act]))
raise ValueError()
T[i,0]=T_pass
T[i,1]=T_act
return T
| 31.01292
| 117
| 0.566406
| 3,915
| 24,004
| 3.29553
| 0.079183
| 0.010386
| 0.046504
| 0.050225
| 0.856301
| 0.83801
| 0.803984
| 0.788095
| 0.765928
| 0.760502
| 0
| 0.094573
| 0.299158
| 24,004
| 774
| 118
| 31.01292
| 0.672353
| 0.21988
| 0
| 0.643646
| 0
| 0
| 0.014351
| 0.004545
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033149
| false
| 0.162983
| 0.027624
| 0
| 0.10221
| 0.016575
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
bd644e7a52038cf0f6d73a0264382887adfa7285
| 10,119
|
py
|
Python
|
fonts/DejaVuSansMono_16.py
|
ironss/micropython-lib
|
61719636dad9aaa581c8e39e71ccc515e75c2d43
|
[
"MIT"
] | null | null | null |
fonts/DejaVuSansMono_16.py
|
ironss/micropython-lib
|
61719636dad9aaa581c8e39e71ccc515e75c2d43
|
[
"MIT"
] | null | null | null |
fonts/DejaVuSansMono_16.py
|
ironss/micropython-lib
|
61719636dad9aaa581c8e39e71ccc515e75c2d43
|
[
"MIT"
] | 2
|
2019-09-24T13:36:55.000Z
|
2020-04-18T02:05:38.000Z
|
# Code generated by font-to-py.py.
# Font: DejaVuSansMono.ttf
version = '0.26'
def height():
return 16
def max_width():
return 9
def hmap():
return False
def reverse():
return False
def monospaced():
return False
def min_ch():
return 32
def max_ch():
return 126
_font =\
b'\x09\x00\x04\x00\x02\x00\xc2\x0d\x42\x00\x22\x00\x1c\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xfe\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x1e\x00'\
b'\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x00\x01\x10\x09\xd0\x07\x7c\x01\x16\x0d\xd0\x07\x7c\x01'\
b'\x16\x01\x10\x00\x09\x00\x38\x04\x4c\x08\x44\x08\xff\x3f\x84\x08'\
b'\x84\x08\x08\x07\x00\x00\x00\x00\x09\x00\x1c\x00\xa2\x00\xa2\x00'\
b'\x62\x00\x5c\x07\xc0\x08\xa0\x08\xa0\x08\x00\x07\x09\x00\xc0\x03'\
b'\x7c\x04\x32\x08\x62\x08\x82\x09\x02\x07\xc0\x09\x00\x00\x00\x00'\
b'\x09\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\xf0\x07\x0c\x18\x02\x20\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x02\x20\x0c\x18\xf0\x07'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x24\x00'\
b'\x28\x00\x18\x00\x7e\x00\x18\x00\x28\x00\x24\x00\x00\x00\x00\x00'\
b'\x09\x00\x80\x00\x80\x00\x80\x00\xf0\x07\x80\x00\x80\x00\x80\x00'\
b'\x00\x00\x00\x00\x09\x00\x00\x20\x00\x1c\x00\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x80\x00\x80\x00\x80\x00'\
b'\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x0c'\
b'\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x00\x10\x00\x0c\x00\x03\xc0\x00\x30\x00\x0c\x00\x02\x00'\
b'\x00\x00\x00\x00\x09\x00\xf8\x03\x04\x04\x02\x08\x62\x08\x62\x08'\
b'\x04\x04\xf8\x03\x00\x00\x00\x00\x09\x00\x04\x08\x02\x08\xfe\x0f'\
b'\x00\x08\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x0c\x08'\
b'\x06\x0c\x02\x0a\x02\x09\x82\x08\x44\x08\x3c\x08\x00\x00\x00\x00'\
b'\x09\x00\x04\x04\x02\x08\x42\x08\x42\x08\x42\x08\xa4\x0c\xbc\x07'\
b'\x00\x00\x00\x00\x09\x00\x80\x01\x60\x01\x30\x01\x0c\x01\x06\x01'\
b'\xfe\x0f\x00\x01\x00\x00\x00\x00\x09\x00\x7e\x04\x22\x08\x22\x08'\
b'\x22\x08\x22\x08\x42\x04\x80\x03\x00\x00\x00\x00\x09\x00\xf0\x03'\
b'\x4c\x04\x26\x08\x22\x08\x22\x08\x62\x0c\xc4\x07\x00\x00\x00\x00'\
b'\x09\x00\x02\x00\x02\x08\x02\x06\x82\x01\x62\x00\x1e\x00\x06\x00'\
b'\x00\x00\x00\x00\x09\x00\xbc\x07\xa6\x0c\x42\x08\x42\x08\x42\x08'\
b'\xa6\x0c\xbc\x07\x00\x00\x00\x00\x09\x00\x7c\x04\xc6\x08\x82\x08'\
b'\x82\x08\x82\x0c\x44\x06\xf8\x01\x00\x00\x00\x00\x09\x00\x30\x0c'\
b'\x30\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x00\x20\x30\x1c\x30\x0c\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x80\x00\xc0\x01\x40\x01\x40\x01\x20\x02'\
b'\x20\x02\x20\x02\x10\x04\x00\x00\x09\x00\x20\x01\x20\x01\x20\x01'\
b'\x20\x01\x20\x01\x20\x01\x20\x01\x20\x01\x00\x00\x09\x00\x10\x04'\
b'\x20\x02\x20\x02\x20\x02\x40\x01\x40\x01\xc0\x01\x80\x00\x00\x00'\
b'\x09\x00\x04\x00\x02\x00\xc2\x0d\x42\x00\x22\x00\x1c\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\xe0\x07\x18\x18\x0c\x10\xc4\x23\x24\x24'\
b'\x2c\x24\xf8\x07\x00\x00\x00\x00\x09\x00\x00\x0c\xc0\x03\x3c\x01'\
b'\x02\x01\x3c\x01\xc0\x03\x00\x0c\x00\x00\x00\x00\x09\x00\xfe\x0f'\
b'\x42\x08\x42\x08\x42\x08\x42\x08\xe6\x0c\xbc\x07\x00\x00\x00\x00'\
b'\x09\x00\xf0\x01\x0c\x06\x02\x08\x02\x08\x02\x08\x02\x08\x04\x04'\
b'\x00\x00\x00\x00\x09\x00\xfe\x0f\x02\x08\x02\x08\x02\x08\x06\x0c'\
b'\x0c\x06\xf0\x01\x00\x00\x00\x00\x09\x00\xfe\x0f\x42\x08\x42\x08'\
b'\x42\x08\x42\x08\x42\x08\x42\x08\x00\x00\x00\x00\x09\x00\xfe\x0f'\
b'\x42\x00\x42\x00\x42\x00\x42\x00\x42\x00\x42\x00\x00\x00\x00\x00'\
b'\x09\x00\xf0\x01\x0c\x06\x02\x08\x02\x08\x42\x08\x42\x08\xc4\x07'\
b'\x00\x00\x00\x00\x09\x00\xfe\x0f\x40\x00\x40\x00\x40\x00\x40\x00'\
b'\x40\x00\xfe\x0f\x00\x00\x00\x00\x09\x00\x02\x08\x02\x08\xfe\x0f'\
b'\x02\x08\x02\x08\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x04'\
b'\x00\x08\x02\x08\x02\x08\x02\x0c\xfe\x07\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\xfe\x0f\x40\x00\x20\x00\xd0\x00\x08\x01\x04\x06\x02\x08'\
b'\x00\x00\x00\x00\x09\x00\xfe\x0f\x00\x08\x00\x08\x00\x08\x00\x08'\
b'\x00\x08\x00\x08\x00\x00\x00\x00\x09\x00\xfe\x0f\x0e\x00\x70\x00'\
b'\x80\x00\x70\x00\x0e\x00\xfe\x0f\x00\x00\x00\x00\x09\x00\xfe\x0f'\
b'\x06\x00\x38\x00\xe0\x00\x00\x03\x00\x0c\xfe\x0f\x00\x00\x00\x00'\
b'\x09\x00\xf8\x03\x04\x04\x02\x08\x02\x08\x02\x08\x04\x04\xf8\x03'\
b'\x00\x00\x00\x00\x09\x00\xfe\x0f\x82\x00\x82\x00\x82\x00\x82\x00'\
b'\xc4\x00\x7c\x00\x00\x00\x00\x00\x09\x00\xf8\x03\x04\x04\x02\x08'\
b'\x02\x08\x02\x18\x04\x3c\xf8\x03\x00\x00\x00\x00\x09\x00\xfe\x0f'\
b'\x42\x00\x42\x00\x42\x00\x42\x00\xa6\x00\x3c\x07\x00\x08\x00\x00'\
b'\x09\x00\x3c\x04\x24\x0c\x42\x08\x42\x08\x42\x08\x86\x0c\x84\x07'\
b'\x00\x00\x00\x00\x09\x00\x02\x00\x02\x00\x02\x00\xfe\x0f\x02\x00'\
b'\x02\x00\x02\x00\x00\x00\x00\x00\x09\x00\xfe\x07\x00\x0c\x00\x08'\
b'\x00\x08\x00\x08\x00\x0c\xfe\x07\x00\x00\x00\x00\x09\x00\x06\x00'\
b'\x78\x00\x80\x07\x00\x08\x80\x07\x78\x00\x06\x00\x00\x00\x00\x00'\
b'\x09\x00\x0e\x00\xf0\x03\x00\x0c\xe0\x03\x10\x00\xe0\x03\x00\x0c'\
b'\xf0\x03\x0e\x00\x09\x00\x02\x08\x0c\x06\xb0\x01\x40\x00\xb0\x01'\
b'\x0c\x06\x02\x08\x00\x00\x00\x00\x09\x00\x02\x00\x06\x00\x18\x00'\
b'\x30\x00\xc0\x0f\x20\x00\x18\x00\x06\x00\x02\x00\x09\x00\x02\x0c'\
b'\x02\x0a\x82\x09\x42\x08\x32\x08\x0a\x08\x06\x08\x00\x00\x00\x00'\
b'\x09\x00\xfe\x3f\x02\x20\x02\x20\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x02\x00\x0c\x00\x30\x00\xc0\x00\x00\x03'\
b'\x00\x0c\x00\x10\x00\x00\x00\x00\x09\x00\x02\x20\x02\x20\xfe\x3f'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x10\x00'\
b'\x18\x00\x0c\x00\x06\x00\x06\x00\x0c\x00\x18\x00\x10\x00\x00\x00'\
b'\x09\x00\x00\x10\x00\x10\x00\x10\x00\x10\x00\x10\x00\x10\x00\x10'\
b'\x00\x10\x00\x10\x09\x00\x01\x00\x03\x00\x06\x00\x04\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x07\xa0\x09\x90\x08'\
b'\x90\x08\x90\x08\x90\x04\xe0\x0f\x00\x00\x00\x00\x09\x00\xfe\x0f'\
b'\x20\x04\x10\x08\x10\x08\x10\x08\x20\x04\xc0\x03\x00\x00\x00\x00'\
b'\x09\x00\xc0\x03\x20\x04\x10\x08\x10\x08\x10\x08\x20\x04\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\xc0\x03\x20\x04\x10\x08\x10\x08\x10\x08'\
b'\x20\x04\xfe\x0f\x00\x00\x00\x00\x09\x00\xc0\x03\x20\x05\x10\x09'\
b'\x10\x09\x10\x09\x20\x09\xc0\x05\x00\x00\x00\x00\x09\x00\x10\x00'\
b'\x10\x00\xfc\x0f\x12\x00\x12\x00\x12\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\xc0\x03\x20\x24\x10\x48\x10\x48\x10\x48\x20\x64\xf0\x3f'\
b'\x00\x00\x00\x00\x09\x00\xfe\x0f\x20\x00\x10\x00\x10\x00\x10\x00'\
b'\x30\x00\xe0\x0f\x00\x00\x00\x00\x09\x00\x00\x08\x10\x08\x10\x08'\
b'\xf6\x0f\x00\x08\x00\x08\x00\x08\x00\x00\x00\x00\x09\x00\x00\x40'\
b'\x10\x40\x10\x40\xf6\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\xfe\x0f\x80\x00\xc0\x00\x20\x01\x20\x02\x10\x04\x00\x08'\
b'\x00\x00\x00\x00\x09\x00\x02\x00\x02\x00\x02\x00\xfe\x07\x00\x08'\
b'\x00\x08\x00\x08\x00\x00\x00\x00\x09\x00\xf0\x0f\x10\x00\x10\x00'\
b'\xf0\x0f\x10\x00\x10\x00\xe0\x0f\x00\x00\x00\x00\x09\x00\xf0\x0f'\
b'\x20\x00\x10\x00\x10\x00\x10\x00\x30\x00\xe0\x0f\x00\x00\x00\x00'\
b'\x09\x00\xc0\x03\x20\x04\x10\x08\x10\x08\x10\x08\x20\x04\xc0\x03'\
b'\x00\x00\x00\x00\x09\x00\xf0\x7f\x20\x04\x10\x08\x10\x08\x10\x08'\
b'\x20\x04\xc0\x03\x00\x00\x00\x00\x09\x00\xc0\x03\x20\x04\x10\x08'\
b'\x10\x08\x10\x08\x20\x04\xf0\x7f\x00\x00\x00\x00\x09\x00\xf0\x0f'\
b'\x20\x00\x10\x00\x10\x00\x10\x00\x20\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\xe0\x04\x90\x08\x90\x08\x90\x08\x10\x09\x10\x09\x20\x07'\
b'\x00\x00\x00\x00\x09\x00\x10\x00\x10\x00\xfc\x07\x10\x08\x10\x08'\
b'\x10\x08\x00\x00\x00\x00\x00\x00\x09\x00\xf0\x07\x00\x0c\x00\x08'\
b'\x00\x08\x00\x08\x00\x04\xf0\x0f\x00\x00\x00\x00\x09\x00\x10\x00'\
b'\xe0\x00\x00\x07\x00\x08\x00\x07\xe0\x00\x10\x00\x00\x00\x00\x00'\
b'\x09\x00\x30\x00\xc0\x03\x00\x0c\x00\x03\xc0\x00\x00\x03\x00\x0c'\
b'\xc0\x03\x30\x00\x09\x00\x10\x08\x30\x0c\x40\x02\x80\x01\x40\x02'\
b'\x30\x0c\x10\x08\x00\x00\x00\x00\x09\x00\x10\x00\xe0\x40\x00\x43'\
b'\x00\x3c\x00\x07\xe0\x00\x10\x00\x00\x00\x00\x00\x09\x00\x10\x0c'\
b'\x10\x0a\x10\x09\x10\x09\x90\x08\x50\x08\x30\x08\x00\x00\x00\x00'\
b'\x09\x00\x80\x00\x80\x00\x7c\x3f\x02\x40\x02\x40\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\xfe\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x02\x40\x02\x40\x7c\x3f'\
b'\x80\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x80\x00'\
b'\x40\x00\x40\x00\x40\x00\x80\x00\x80\x00\x80\x00\x40\x00\x00\x00'\
_index =\
b'\x00\x00\x14\x00\x28\x00\x3c\x00\x50\x00\x64\x00\x78\x00\x8c\x00'\
b'\xa0\x00\xb4\x00\xc8\x00\xdc\x00\xf0\x00\x04\x01\x18\x01\x2c\x01'\
b'\x40\x01\x54\x01\x68\x01\x7c\x01\x90\x01\xa4\x01\xb8\x01\xcc\x01'\
b'\xe0\x01\xf4\x01\x08\x02\x1c\x02\x30\x02\x44\x02\x58\x02\x6c\x02'\
b'\x80\x02\x94\x02\xa8\x02\xbc\x02\xd0\x02\xe4\x02\xf8\x02\x0c\x03'\
b'\x20\x03\x34\x03\x48\x03\x5c\x03\x70\x03\x84\x03\x98\x03\xac\x03'\
b'\xc0\x03\xd4\x03\xe8\x03\xfc\x03\x10\x04\x24\x04\x38\x04\x4c\x04'\
b'\x60\x04\x74\x04\x88\x04\x9c\x04\xb0\x04\xc4\x04\xd8\x04\xec\x04'\
b'\x00\x05\x14\x05\x28\x05\x3c\x05\x50\x05\x64\x05\x78\x05\x8c\x05'\
b'\xa0\x05\xb4\x05\xc8\x05\xdc\x05\xf0\x05\x04\x06\x18\x06\x2c\x06'\
b'\x40\x06\x54\x06\x68\x06\x7c\x06\x90\x06\xa4\x06\xb8\x06\xcc\x06'\
b'\xe0\x06\xf4\x06\x08\x07\x1c\x07\x30\x07\x44\x07\x58\x07\x6c\x07'\
b'\x80\x07'
_mvfont = memoryview(_font)
def _chr_addr(ordch):
offset = 2 * (ordch - 32)
return int.from_bytes(_index[offset:offset + 2], 'little')
def get_width(s):
width = 0
for ch in s:
ordch = ord(ch)
ordch = ordch + 1 if ordch >= 32 and ordch <= 126 else 32
offset = _chr_addr(ordch)
width += int.from_bytes(_font[offset:offset + 2], 'little')
return width
def get_ch(ch):
ordch = ord(ch)
ordch = ordch + 1 if ordch >= 32 and ordch <= 126 else 32
offset = _chr_addr(ordch)
width = int.from_bytes(_font[offset:offset + 2], 'little')
next_offs = _chr_addr(ordch +1)
return _mvfont[offset + 2:next_offs], width
| 54.403226
| 68
| 0.701453
| 2,396
| 10,119
| 2.951586
| 0.06177
| 0.386878
| 0.427602
| 0.400452
| 0.634615
| 0.599972
| 0.553733
| 0.509333
| 0.414027
| 0.357466
| 0
| 0.401973
| 0.038245
| 10,119
| 185
| 69
| 54.697297
| 0.324702
| 0.005633
| 0
| 0.065089
| 1
| 0.781065
| 0.842911
| 0.839928
| 0
| 1
| 0
| 0
| 0
| 1
| 0.059172
| false
| 0
| 0
| 0.04142
| 0.118343
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
bdd4d6f2788703591e8228c84761525ba7fb6ecc
| 9,020
|
py
|
Python
|
tests/test_complex.py
|
gasparka/pyha
|
60d9bbfd6075e7548d670d05317d64bc2a1a19ee
|
[
"Apache-2.0"
] | 6
|
2017-05-18T18:57:07.000Z
|
2020-08-06T11:23:34.000Z
|
tests/test_complex.py
|
gasparka/pyha
|
60d9bbfd6075e7548d670d05317d64bc2a1a19ee
|
[
"Apache-2.0"
] | 607
|
2017-05-10T12:51:54.000Z
|
2022-03-31T18:08:15.000Z
|
tests/test_complex.py
|
gasparka/pyha
|
60d9bbfd6075e7548d670d05317d64bc2a1a19ee
|
[
"Apache-2.0"
] | 1
|
2019-03-20T13:57:46.000Z
|
2019-03-20T13:57:46.000Z
|
from pyha import Hardware, sims_close, Complex, hardware_sims_equal, scalb, simulate
import numpy as np
from pyha.common.shift_register import ShiftRegister
def test_loopback():
class T(Hardware):
def main(self, x):
return x
dut = T()
inp = np.random.uniform(-1, 1, 2) + np.random.uniform(-1, 1, 2) * 1j
sims = simulate(dut, inp, simulations=['HARDWARE', 'RTL'])
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_register():
class T(Hardware):
def __init__(self):
self.DELAY = 1
self.reg = Complex() # TODO: this should resize to 0, -17??
def main(self, x):
self.reg = x
return self.reg
dut = T()
inp = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, inp)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_loopback_negative_left():
class T(Hardware):
def main(self, x):
return x
dut = T()
inp = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
inp *= 0.05
sims = simulate(dut, inp, input_types=[Complex(0, -3, -20)], conversion_path='/tmp/pyha_output')
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_old_shiftreg():
class T(Hardware):
def __init__(self):
self.reg = [Complex() for _ in range(16)]
self.DELAY = 1
def main(self, x):
self.reg = [x] + self.reg[:-1]
return self.reg[-1]
dut = T()
inp = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, inp, simulations=['HARDWARE', 'RTL', 'NETLIST'])
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_new_shiftreg():
class T(Hardware):
def __init__(self):
self.reg = ShiftRegister([Complex() for _ in range(16)])
self.DELAY = 1
def main(self, x):
self.reg.push_next(x)
return self.reg.peek()
dut = T()
inp = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, inp, simulations=['HARDWARE', 'RTL', 'NETLIST'])
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_multiply():
class T(Hardware):
def main(self, a, b):
return a * b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_multiply_negative_left():
class T(Hardware):
def main(self, a, b):
return a * b
dut = T()
a = np.random.uniform(-0.01, 0.01, 256) + np.random.uniform(-0.01, 0.01, 256) * 1j
b = np.random.uniform(-0.01, 0.01, 256) + np.random.uniform(-0.01, 0.01, 256) * 1j
sims = simulate(dut, a, b, input_types=[Complex(0, -3, -20), Complex(0, -3, -20)], conversion_path='/tmp/pyha_output/src')
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_add():
class T(Hardware):
def main(self, a, b):
return a + b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_sub():
class T(Hardware):
def main(self, a, b):
return a - b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_rshift():
class T(Hardware):
def main(self, a, b):
return a >> b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.randint(0, 17, 256)
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_lshift():
class T(Hardware):
def main(self, a, b):
return a << b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.randint(0, 17, 256)
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
# assert sims_close(sims)
def test_scalb():
class T(Hardware):
def __init__(self, scalbi):
self.SCALB_I = scalbi
def main(self, a):
# ret = scalb(a, b)
return scalb(a, self.SCALB_I)
dut = T(-1)
a = [0.125 + 0.25j]
sims = simulate(dut, a)
assert hardware_sims_equal(sims)
assert sims_close(sims)
dut = T(0)
sims = simulate(dut, a)
assert hardware_sims_equal(sims)
assert sims_close(sims)
dut = T(1)
sims = simulate(dut, a)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_scalb_bug():
""" Result with negative integer bits were mishandled.. """
# TODO: probably not fully resolved...
class T(Hardware):
def __init__(self, scalbi):
self.SCALB_I = scalbi
def main(self, a):
ret = scalb(a, self.SCALB_I)
return ret
dut = T(-1)
a = [0.125 + 0.25j]
sims = simulate(dut, a)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_part_access():
class T(Hardware):
def main(self, a):
return a.real, a.imag
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, a)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_part_access_submod():
""" Bug: 'a.elem' was merged to 'aelem', see https://github.com/PyCQA/redbaron/issues/161 """
class A(Hardware):
def __init__(self, elem):
self.elem = elem
class T(Hardware):
def main(self, a):
return a.elem.real, a.elem.imag
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
a = [A(x) for x in a]
sims = simulate(dut, a)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_add_float():
class T(Hardware):
def main(self, a, b):
return a + b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.uniform(-1, 1, 256)
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_sub_float():
class T(Hardware):
def main(self, a, b):
return a - b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.uniform(-1, 1, 256)
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_sub_uneven_types():
""" Failed if a and b were different sizes, bug was in minimum function, that acted as maximum """
class T(Hardware):
def main(self, a, b):
return a - b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.uniform(-1, 1, 256)
sims = simulate(dut, a, b, input_types=[Complex(0, 0, -17), Complex(0, 0, -18)], simulations=['HARDWARE', 'RTL'],
conversion_path='/home/gaspar/git/pyhacores/playground')
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_mult_float():
class T(Hardware):
def main(self, a, b):
return a * b
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
b = np.random.uniform(-1, 1, 256)
sims = simulate(dut, a, b)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_floatconst_operations():
class T(Hardware):
def main(self, a):
q = a + 0.24
w = a - 0.2
e = a * 0.4
return q, w, e
dut = T()
a = np.random.uniform(-1, 1, 256) + np.random.uniform(-1, 1, 256) * 1j
sims = simulate(dut, a)
assert hardware_sims_equal(sims)
assert sims_close(sims)
def test_complex_constants():
class T(Hardware):
def __init__(self):
self.DELAY = 1
self.reg = Complex(0, 0, -17)
self.reg2 = Complex(0, 0, -17)
self.reg3 = Complex(0, 0, -17)
def main(self, x):
self.reg = self.reg + x - (x * x) # this was incorrectly parsed as complex constant!
self.reg2 = 0.0 + 0.5j
self.reg3 = 0.0 + 0.5 * 1j
return self.reg, self.reg2
dut = T()
inputs = [0 + 0j, 0.1 + 0.2j, -0.1 + 0.3j]
sims = simulate(dut, inputs, simulations=['HARDWARE', 'RTL'],
conversion_path='/home/gaspar/git/pyhacores/playground')
assert sims_close(sims)
| 26.144928
| 126
| 0.575721
| 1,345
| 9,020
| 3.747212
| 0.111524
| 0.079365
| 0.142857
| 0.139683
| 0.828968
| 0.819246
| 0.812698
| 0.807143
| 0.785317
| 0.730754
| 0
| 0.05943
| 0.279933
| 9,020
| 344
| 127
| 26.22093
| 0.716551
| 0.044013
| 0
| 0.720165
| 0
| 0
| 0.020814
| 0.008605
| 0
| 0
| 0
| 0.002907
| 0.18107
| 1
| 0.201646
| false
| 0
| 0.012346
| 0.061728
| 0.390947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
da0fb74ade327eeeca4e5304acee9f105a33a3d5
| 14,912
|
py
|
Python
|
devel/lib/python2.7/dist-packages/interbotix_xs_sdk/srv/_OperatingModes.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
devel/lib/python2.7/dist-packages/interbotix_xs_sdk/srv/_OperatingModes.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
devel/lib/python2.7/dist-packages/interbotix_xs_sdk/srv/_OperatingModes.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from interbotix_xs_sdk/OperatingModesRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class OperatingModesRequest(genpy.Message):
_md5sum = "cb68bef3d517c840b0a5cc0f73d64e36"
_type = "interbotix_xs_sdk/OperatingModesRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# Set Operating Modes
#
# To get familiar with the various operating modes, go to...
# http://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_workbench/
# ...click on a motor model, and scroll down to the 'Operating Mode' section.
#
# There are 6 valid operating modes. They are...
# "position" - allows up to 1 complete joint revolution (perfect for arm joints); units are in radians
# "ext_position" - allows up to 512 joint revolutions; units are in radians
# "velocity" - allows infinite number of rotations (perfect for wheeled robots); units are in rad/s
# "current" - allows infinite number of rotations (perfect for grippers); units are in milliamps
# "current_based_position" - allows up to 512 joint revolutions; units are in radians
# "pwm" - allows infinite number of rotations (perfect for grippers); units are in PWM
#
# Note that the interbotix_xs_sdk offers one other 'pseudo' operating mode that can be useful in controlling Interbotix Grippers - called "linear_position".
# Behind the scenes, it uses the "position" operating mode mentioned above. The main difference is that with this mode, a desired linear distance [m]
# between the two gripper fingers can be commanded. In the "position" mode though, only the angular position of the motor can be commanded.
#
# There are 2 valid profile types - either 'time' or 'velocity'. Depending on which is chosen, the following parameters behave differently.
#
# 1) profile_velocity: acts as a pass-through to the Profile_Velocity register and operates in one of two ways. If
# 'profile_type' is set to 'velocity', this parameter describes the max velocity limit for the specified joint(s);
# for example, if doing 'position' control, setting this to '131' would be equivalent to a limit of 3.14 rad/s; if
# 'profile_type' is set to 'time', this parameter sets the time span (in milliseconds) that it should take for the
# specified joint(s) to move; to have an 'infinite' max limit, set to '0'.
#
# 2) profile_acceleration: acts as a pass-through to the Profile_Acceleration register and operates in one of two ways. If
# 'profile_type' is set to 'velocity', this parameter describes the max acceleration limit for the specified joint(s);
# for example, if doing 'position' or 'velocity' control, setting this to '15' would be equivalent to a limit of 5.6 rad/s^2;
# if 'profile_type' is set to 'time', this parameter sets the time span (in milliseconds) that it should take for the
# specified joint(s) to accelerate; to have an 'infinite' max limit, set to '0'.
string cmd_type # set to 'group' if commanding a joint group or 'single' if commanding a single joint
string name # name of the group if commanding a joint group or joint if commanding a single joint
string mode # desired operating mode as described above
string profile_type # desired 'profile' type - either 'time' or 'velocity' as described above
int32 profile_velocity # desired velocity profile as explained above - only used in 'position' or the 'ext_position' control modes
int32 profile_acceleration # desired acceleration profile as explained above - used in all modes except for 'current' and 'pwm' control
"""
__slots__ = ['cmd_type','name','mode','profile_type','profile_velocity','profile_acceleration']
_slot_types = ['string','string','string','string','int32','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
cmd_type,name,mode,profile_type,profile_velocity,profile_acceleration
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(OperatingModesRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.cmd_type is None:
self.cmd_type = ''
if self.name is None:
self.name = ''
if self.mode is None:
self.mode = ''
if self.profile_type is None:
self.profile_type = ''
if self.profile_velocity is None:
self.profile_velocity = 0
if self.profile_acceleration is None:
self.profile_acceleration = 0
else:
self.cmd_type = ''
self.name = ''
self.mode = ''
self.profile_type = ''
self.profile_velocity = 0
self.profile_acceleration = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.cmd_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.mode
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.profile_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2i().pack(_x.profile_velocity, _x.profile_acceleration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.cmd_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.cmd_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mode = str[start:end].decode('utf-8', 'rosmsg')
else:
self.mode = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.profile_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.profile_type = str[start:end]
_x = self
start = end
end += 8
(_x.profile_velocity, _x.profile_acceleration,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.cmd_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.mode
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.profile_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2i().pack(_x.profile_velocity, _x.profile_acceleration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.cmd_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.cmd_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mode = str[start:end].decode('utf-8', 'rosmsg')
else:
self.mode = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.profile_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.profile_type = str[start:end]
_x = self
start = end
end += 8
(_x.profile_velocity, _x.profile_acceleration,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from interbotix_xs_sdk/OperatingModesResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class OperatingModesResponse(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "interbotix_xs_sdk/OperatingModesResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(OperatingModesResponse, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
class OperatingModes(object):
_type = 'interbotix_xs_sdk/OperatingModes'
_md5sum = 'cb68bef3d517c840b0a5cc0f73d64e36'
_request_class = OperatingModesRequest
_response_class = OperatingModesResponse
| 38.5323
| 156
| 0.65397
| 2,056
| 14,912
| 4.600681
| 0.142996
| 0.037213
| 0.030236
| 0.023681
| 0.748599
| 0.748599
| 0.742256
| 0.72587
| 0.719526
| 0.713183
| 0
| 0.015663
| 0.233637
| 14,912
| 386
| 157
| 38.632124
| 0.812041
| 0.161548
| 0
| 0.770548
| 1
| 0.065068
| 0.332259
| 0.020818
| 0
| 0
| 0.001652
| 0
| 0
| 1
| 0.05137
| false
| 0.013699
| 0.027397
| 0
| 0.174658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e519899586bbce4511a7ae0a743f8707d1f98f97
| 17,541
|
py
|
Python
|
idaes/generic_models/unit_models/column_models/tests/test_solvent_condenser.py
|
michaelbynum/idaes-pse
|
b9c7bc21d0d411657cbe448c40afdc96c41e3465
|
[
"RSA-MD"
] | 1
|
2019-02-21T22:03:48.000Z
|
2019-02-21T22:03:48.000Z
|
idaes/generic_models/unit_models/column_models/tests/test_solvent_condenser.py
|
michaelbynum/idaes-pse
|
b9c7bc21d0d411657cbe448c40afdc96c41e3465
|
[
"RSA-MD"
] | 1
|
2021-02-27T00:40:54.000Z
|
2021-03-01T13:51:55.000Z
|
idaes/generic_models/unit_models/column_models/tests/test_solvent_condenser.py
|
michaelbynum/idaes-pse
|
b9c7bc21d0d411657cbe448c40afdc96c41e3465
|
[
"RSA-MD"
] | 1
|
2021-09-10T16:00:58.000Z
|
2021-09-10T16:00:58.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for solvent condenser unit model.
Authors: Andrew Lee
"""
import pytest
from pyomo.environ import (ConcreteModel,
Constraint,
Param,
TerminationCondition,
SolverStatus,
units,
value)
from pyomo.util.check_units import (assert_units_consistent,
assert_units_equivalent)
from idaes.core import FlowsheetBlock
from idaes.generic_models.properties.core.generic.generic_property import (
GenericParameterBlock)
from idaes.core.util.model_statistics import (degrees_of_freedom,
number_variables,
number_total_constraints,
number_unused_variables)
from idaes.core.util.testing import initialization_tester
from idaes.core.util import get_solver, scaling as iscale
from idaes.generic_models.unit_models.column_models.solvent_condenser import (
SolventCondenser)
from idaes.power_generation.carbon_capture.mea_solvent_system.properties.MEA_solvent \
import configuration as aqueous_mea
from idaes.power_generation.carbon_capture.mea_solvent_system.properties.MEA_vapor \
import wet_co2
# -----------------------------------------------------------------------------
# Get default solver for testing
solver = get_solver()
# -----------------------------------------------------------------------------
class TestStripperVaporFlow(object):
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.liquid_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.vapor_properties = GenericParameterBlock(default=wet_co2)
m.fs.unit = SolventCondenser(default={
"liquid_property_package": m.fs.liquid_properties,
"vapor_property_package": m.fs.vapor_properties})
m.fs.unit.inlet.flow_mol[0].fix(1.1117)
m.fs.unit.inlet.temperature[0].fix(339.33)
m.fs.unit.inlet.pressure[0].fix(184360)
m.fs.unit.inlet.mole_frac_comp[0, "CO2"].fix(0.8817)
m.fs.unit.inlet.mole_frac_comp[0, "H2O"].fix(0.1183)
m.fs.unit.reflux.flow_mol[0].fix(0.1083)
iscale.set_scaling_factor(
m.fs.unit.vapor_phase.properties_out[0].fug_phase_comp[
"Vap", "CO2"], 1e-5)
iscale.set_scaling_factor(
m.fs.unit.vapor_phase.properties_out[0].fug_phase_comp[
"Vap", "H2O"], 1e-3)
iscale.calculate_scaling_factors(m.fs.unit)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, model):
assert hasattr(model.fs.unit, "inlet")
assert len(model.fs.unit.inlet.vars) == 4
assert hasattr(model.fs.unit.inlet, "flow_mol")
assert hasattr(model.fs.unit.inlet, "mole_frac_comp")
assert hasattr(model.fs.unit.inlet, "temperature")
assert hasattr(model.fs.unit.inlet, "pressure")
assert hasattr(model.fs.unit, "reflux")
assert len(model.fs.unit.reflux.vars) == 4
assert hasattr(model.fs.unit.reflux, "flow_mol")
assert hasattr(model.fs.unit.reflux, "mole_frac_comp")
assert hasattr(model.fs.unit.reflux, "temperature")
assert hasattr(model.fs.unit.reflux, "pressure")
assert hasattr(model.fs.unit, "vapor_outlet")
assert len(model.fs.unit.vapor_outlet.vars) == 4
assert hasattr(model.fs.unit.vapor_outlet, "flow_mol")
assert hasattr(model.fs.unit.vapor_outlet, "mole_frac_comp")
assert hasattr(model.fs.unit.vapor_outlet, "temperature")
assert hasattr(model.fs.unit.vapor_outlet, "pressure")
assert isinstance(model.fs.unit.unit_material_balance, Constraint)
assert isinstance(model.fs.unit.unit_enthalpy_balance, Constraint)
assert isinstance(model.fs.unit.unit_temperature_equality, Constraint)
assert isinstance(model.fs.unit.unit_pressure_balance, Constraint)
assert isinstance(model.fs.unit.zero_flow_param, Param)
assert number_variables(model.fs.unit) == 55
assert number_total_constraints(model.fs.unit) == 49
assert number_unused_variables(model.fs.unit) == 0
@pytest.mark.component
def test_units(self, model):
assert_units_consistent(model)
assert_units_equivalent(model.fs.unit.heat_duty[0], units.W)
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
# @pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert (pytest.approx(0.1083, rel=1e-5) ==
value(model.fs.unit.reflux.flow_mol[0]))
assert (pytest.approx(0, abs=1e-3) ==
value(model.fs.unit.reflux.mole_frac_comp[0, 'CO2']))
assert (pytest.approx(0, abs=1e-3) ==
value(model.fs.unit.reflux.mole_frac_comp[0, 'MEA']))
assert (pytest.approx(1, rel=1e-3) ==
value(model.fs.unit.reflux.mole_frac_comp[0, 'H2O']))
assert (pytest.approx(184360, rel=1e-5) ==
value(model.fs.unit.reflux.pressure[0]))
assert (pytest.approx(303.244, rel=1e-5) ==
value(model.fs.unit.reflux.temperature[0]))
assert (pytest.approx(1.0034, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.flow_mol[0]))
assert (pytest.approx(0.976758, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.mole_frac_comp[0, 'CO2']))
assert (pytest.approx(0.0232423, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.mole_frac_comp[0, 'H2O']))
assert (pytest.approx(184360, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.pressure[0]))
assert (pytest.approx(303.244, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.temperature[0]))
assert (pytest.approx(-6264.72, rel=1e-5) ==
value(model.fs.unit.heat_duty[0]))
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, model):
assert abs(value(model.fs.unit.inlet.flow_mol[0] -
model.fs.unit.reflux.flow_mol[0] -
model.fs.unit.vapor_outlet.flow_mol[0])) <= 1e-6
assert (abs(value(model.fs.unit.inlet.flow_mol[0] *
model.fs.unit.inlet.mole_frac_comp[0, "CO2"] -
model.fs.unit.reflux.flow_mol[0] *
model.fs.unit.reflux.mole_frac_comp[0, "CO2"] -
model.fs.unit.vapor_outlet.flow_mol[0] *
model.fs.unit.vapor_outlet.mole_frac_comp[0, "CO2"]))
<= 1e-6)
assert (abs(value(model.fs.unit.inlet.flow_mol[0] *
model.fs.unit.inlet.mole_frac_comp[0, "H2O"] -
model.fs.unit.reflux.flow_mol[0] *
model.fs.unit.reflux.mole_frac_comp[0, "H2O"] -
model.fs.unit.vapor_outlet.flow_mol[0] *
model.fs.unit.vapor_outlet.mole_frac_comp[0, "H2O"]))
<= 1e-6)
assert (abs(value(model.fs.unit.reflux.flow_mol[0] *
model.fs.unit.reflux.mole_frac_comp[0, "MEA"]))
<= 1e-6)
assert abs(value(
model.fs.unit.vapor_phase.properties_in[0]._enthalpy_flow_term[
"Vap"] -
model.fs.unit.vapor_phase.properties_out[0]._enthalpy_flow_term[
"Vap"] -
model.fs.unit.liquid_phase[0]._enthalpy_flow_term["Liq"] +
model.fs.unit.heat_duty[0])) <= 1e-6
# -----------------------------------------------------------------------------
class TestStripperHeatDuty(object):
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.liquid_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.vapor_properties = GenericParameterBlock(default=wet_co2)
m.fs.unit = SolventCondenser(default={
"liquid_property_package": m.fs.liquid_properties,
"vapor_property_package": m.fs.vapor_properties})
m.fs.unit.inlet.flow_mol[0].fix(1.1117)
m.fs.unit.inlet.temperature[0].fix(339.33)
m.fs.unit.inlet.pressure[0].fix(184360)
m.fs.unit.inlet.mole_frac_comp[0, "CO2"].fix(0.8817)
m.fs.unit.inlet.mole_frac_comp[0, "H2O"].fix(0.1183)
m.fs.unit.heat_duty.fix(-6264)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, model):
assert hasattr(model.fs.unit, "inlet")
assert len(model.fs.unit.inlet.vars) == 4
assert hasattr(model.fs.unit.inlet, "flow_mol")
assert hasattr(model.fs.unit.inlet, "mole_frac_comp")
assert hasattr(model.fs.unit.inlet, "temperature")
assert hasattr(model.fs.unit.inlet, "pressure")
assert hasattr(model.fs.unit, "reflux")
assert len(model.fs.unit.reflux.vars) == 4
assert hasattr(model.fs.unit.reflux, "flow_mol")
assert hasattr(model.fs.unit.reflux, "mole_frac_comp")
assert hasattr(model.fs.unit.reflux, "temperature")
assert hasattr(model.fs.unit.reflux, "pressure")
assert hasattr(model.fs.unit, "vapor_outlet")
assert len(model.fs.unit.vapor_outlet.vars) == 4
assert hasattr(model.fs.unit.vapor_outlet, "flow_mol")
assert hasattr(model.fs.unit.vapor_outlet, "mole_frac_comp")
assert hasattr(model.fs.unit.vapor_outlet, "temperature")
assert hasattr(model.fs.unit.vapor_outlet, "pressure")
assert isinstance(model.fs.unit.unit_material_balance, Constraint)
assert isinstance(model.fs.unit.unit_enthalpy_balance, Constraint)
assert isinstance(model.fs.unit.unit_temperature_equality, Constraint)
assert isinstance(model.fs.unit.unit_pressure_balance, Constraint)
assert isinstance(model.fs.unit.zero_flow_param, Param)
assert number_variables(model.fs.unit) == 55
assert number_total_constraints(model.fs.unit) == 49
assert number_unused_variables(model.fs.unit) == 0
@pytest.mark.component
def test_units(self, model):
assert_units_consistent(model)
assert_units_equivalent(model.fs.unit.heat_duty[0], units.W)
@pytest.mark.unit
def test_dof(self, model):
assert degrees_of_freedom(model) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
# @pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert (pytest.approx(0.108291, rel=1e-5) ==
value(model.fs.unit.reflux.flow_mol[0]))
assert (pytest.approx(0, abs=1e-3) ==
value(model.fs.unit.reflux.mole_frac_comp[0, 'CO2']))
assert (pytest.approx(0, abs=1e-3) ==
value(model.fs.unit.reflux.mole_frac_comp[0, 'MEA']))
assert (pytest.approx(1, rel=1e-3) ==
value(model.fs.unit.reflux.mole_frac_comp[0, 'H2O']))
assert (pytest.approx(184360, rel=1e-5) ==
value(model.fs.unit.reflux.pressure[0]))
assert (pytest.approx(303.250, rel=1e-5) ==
value(model.fs.unit.reflux.temperature[0]))
assert (pytest.approx(1.0034, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.flow_mol[0]))
assert (pytest.approx(0.976758, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.mole_frac_comp[0, 'CO2']))
assert (pytest.approx(0.0232509, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.mole_frac_comp[0, 'H2O']))
assert (pytest.approx(184360, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.pressure[0]))
assert (pytest.approx(303.250, rel=1e-5) ==
value(model.fs.unit.vapor_outlet.temperature[0]))
assert (pytest.approx(-6264, rel=1e-5) ==
value(model.fs.unit.heat_duty[0]))
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, model):
assert abs(value(model.fs.unit.inlet.flow_mol[0] -
model.fs.unit.reflux.flow_mol[0] -
model.fs.unit.vapor_outlet.flow_mol[0])) <= 1e-6
assert (abs(value(model.fs.unit.inlet.flow_mol[0] *
model.fs.unit.inlet.mole_frac_comp[0, "CO2"] -
model.fs.unit.reflux.flow_mol[0] *
model.fs.unit.reflux.mole_frac_comp[0, "CO2"] -
model.fs.unit.vapor_outlet.flow_mol[0] *
model.fs.unit.vapor_outlet.mole_frac_comp[0, "CO2"]))
<= 1e-6)
assert (abs(value(model.fs.unit.inlet.flow_mol[0] *
model.fs.unit.inlet.mole_frac_comp[0, "H2O"] -
model.fs.unit.reflux.flow_mol[0] *
model.fs.unit.reflux.mole_frac_comp[0, "H2O"] -
model.fs.unit.vapor_outlet.flow_mol[0] *
model.fs.unit.vapor_outlet.mole_frac_comp[0, "H2O"]))
<= 1e-6)
assert (abs(value(model.fs.unit.reflux.flow_mol[0] *
model.fs.unit.reflux.mole_frac_comp[0, "MEA"]))
<= 1e-6)
assert abs(value(
model.fs.unit.vapor_phase.properties_in[0]._enthalpy_flow_term[
"Vap"] -
model.fs.unit.vapor_phase.properties_out[0]._enthalpy_flow_term[
"Vap"] -
model.fs.unit.liquid_phase[0]._enthalpy_flow_term["Liq"] +
model.fs.unit.heat_duty[0])) <= 1e-6
@pytest.mark.component
def test_scaling(self, model):
iscale.set_scaling_factor(
model.fs.unit.vapor_phase.properties_out[0].fug_phase_comp[
"Vap", "CO2"], 1e-5)
iscale.set_scaling_factor(
model.fs.unit.vapor_phase.properties_out[0].fug_phase_comp[
"Vap", "H2O"], 1e-3)
iscale.calculate_scaling_factors(model.fs.unit)
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_material_balance[0, "CO2"]) == 1
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_material_balance[0, "H2O"]) == 1
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_material_balance[0, "MEA"]) == 1e8
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_phase_equilibrium[0, "CO2"]) == 1e-5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_phase_equilibrium[0, "H2O"]) == 1e-3
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_temperature_equality[0]) == 1e-2
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_enthalpy_balance[0]) == 1
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.unit_pressure_balance[0]) == 1e-5
| 44.633588
| 86
| 0.615757
| 2,216
| 17,541
| 4.708484
| 0.100632
| 0.085106
| 0.138106
| 0.061913
| 0.876174
| 0.873682
| 0.867644
| 0.867644
| 0.85921
| 0.85921
| 0
| 0.031471
| 0.242803
| 17,541
| 392
| 87
| 44.747449
| 0.754103
| 0.057522
| 0
| 0.846405
| 0
| 0
| 0.043255
| 0.005506
| 0
| 0
| 0
| 0
| 0.346405
| 1
| 0.055556
| false
| 0
| 0.035948
| 0
| 0.104575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e55417747e373fae25b5439af4a4a1b9c9247172
| 22,970
|
py
|
Python
|
tests/pulses/sequences/test_eval_simplify_sequences.py
|
jerjohste/exopy_pulses
|
844660082331f8972039a085397a92c9a06a46af
|
[
"BSD-3-Clause"
] | 2
|
2016-02-09T20:23:16.000Z
|
2017-09-04T10:18:45.000Z
|
tests/pulses/sequences/test_eval_simplify_sequences.py
|
jerjohste/exopy_pulses
|
844660082331f8972039a085397a92c9a06a46af
|
[
"BSD-3-Clause"
] | 15
|
2015-12-14T21:58:50.000Z
|
2017-10-12T07:04:33.000Z
|
tests/pulses/sequences/test_eval_simplify_sequences.py
|
jerjohste/exopy_pulses
|
844660082331f8972039a085397a92c9a06a46af
|
[
"BSD-3-Clause"
] | 2
|
2016-02-09T20:23:16.000Z
|
2017-09-07T09:41:36.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyPulses Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test evaluating and simplifying base sequences
"""
from collections import OrderedDict
import pytest
from exopy_pulses.pulses.pulse import Pulse
from exopy_pulses.pulses.shapes.square_shape import SquareShape
from exopy_pulses.pulses.sequences.base_sequences\
import RootSequence, BaseSequence
from exopy_pulses.testing.context import DummyContext
@pytest.fixture
def root():
root = RootSequence()
context = DummyContext(sampling=0.5)
root.context = context
return root
def add_children(seq, children):
"""Add a sequence of item to a BaseSequence.
"""
for i, c in enumerate(children):
seq.add_child_item(i, c)
def test_sequence_compilation1(root):
"""Test compiling a flat sequence.
"""
root.external_vars = OrderedDict({'a': 1.5})
root.local_vars = OrderedDict({'b': '2*{a}'})
pulse1 = Pulse(def_1='1.0', def_2='{a}', kind='Analogical',
shape=SquareShape(amplitude='0.5',
_cache={'amplitude': 1.0}))
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10 + {b}')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
print(errors)
pulses = root.items
assert res
assert len(pulses) == 3
assert pulses[0].start == 1.0
assert pulses[0].stop == 1.5
assert pulses[0].duration == 0.5
assert pulses[0].shape._cache['amplitude'] == 0.5
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2].start == 3.5
assert pulses[2].stop == 13.0
assert pulses[2].duration == 9.5
def test_sequence_compilation1bis(root):
"""Compiles two times a sequence while changing a parameter to make
sure the cache is cleaned in between
Also validate that the context cache is cleaned
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='4.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10')
add_children(root, (pulse1, pulse2, pulse3))
res, _, _ = root.evaluate_sequence()
pulses = root.items
context = root.context
assert not context._cache
context._cache = {'a': 1}
assert res
assert len(pulses) == 3
assert pulses[0].stop == 1.5
root.external_vars = OrderedDict({'a': 2.})
res = root.evaluate_sequence()
pulses = root.items
context = root.context
assert not context._cache
assert res
assert len(pulses) == 3
assert pulses[0].stop == 2.
def test_sequence_compilation2(root):
"""Test compiling a flat sequence of fixed duration.
"""
root.external_vars = OrderedDict({'a': 1.5})
root.time_constrained = True
root.sequence_duration = '10.0'
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='{sequence_end}')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
print(errors)
pulses = root.items
assert res
assert len(pulses) == 3
assert pulses[0].start == 1.0
assert pulses[0].stop == 1.5
assert pulses[0].duration == 0.5
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2].start == 3.5
assert pulses[2].stop == 10.0
assert pulses[2].duration == 6.5
def test_sequence_compilation2bis(root):
"""Test compiling a flat sequence of fixed duration but with a pulse
stopping too late.
"""
root.external_vars = OrderedDict({'a': 1.5})
root.time_constrained = True
root.sequence_duration = '10.0'
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='{sequence_end} + 1')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
assert not res
assert 'root-stop' in errors
def test_sequence_compilation3(root):
"""Test compiling a flat sequence in two passes.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{2_start} - 1.0')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10')
add_children(root, (pulse1, pulse2, pulse3))
res, _, _ = root.evaluate_sequence()
pulses = root.items
assert res
assert len(pulses) == 3
assert pulses[0].start == 1.0
assert pulses[0].stop == 1.5
assert pulses[0].duration == 0.5
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2].start == 3.5
assert pulses[2].stop == 10.0
assert pulses[2].duration == 6.5
def test_sequence_compilation4(root):
"""Test compiling a flat sequence with circular references.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{2_start} - 1.0')
pulse2 = Pulse(def_1='{1_stop} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
assert not res
assert len(missings) == 2
assert '1_stop' in missings
assert '2_start' in missings
assert len(errors) == 0
def test_sequence_compilation5(root):
"""Test compiling a flat sequence with evaluation errors.
missing global
"""
root.time_constrained = True
root.sequence_duration = '10.0'
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='{sequence_end}')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
assert not res
assert len(missings) == 1
assert 'a' in missings
assert len(errors) == 0
def test_sequence_compilation6(root):
"""Test compiling a flat sequence with evaluation errors.
wrong string value
"""
root.external_vars = OrderedDict({'a': 1.5})
root.time_constrained = True
root.sequence_duration = '*10.0*'
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} +* 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10.0')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
assert not res
assert not missings
assert len(errors) == 2
assert '2_start' in errors
assert 'root_seq_duration' in errors
def test_sequence_compilation6bis(root):
"""Test compiling a flat sequence with evaluation errors.
local vars of root
"""
root.time_constrained = True
root.sequence_duration = '10.0'
root.external_vars = OrderedDict({'a': 1.5})
root.local_vars = OrderedDict({'b': '2*{a}+', 'c': '{dummy}'})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='{sequence_end}')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
assert not res
assert 'dummy' in missings
assert 'root_b' in errors
def test_sequence_compilation6ter(root):
"""Test compiling a flat sequence with evaluation errors.
wrong string value
"""
root.external_vars = OrderedDict({'a': 1.5})
root.time_constrained = True
root.sequence_duration = '10.0*{dummy}'
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} +* 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10.0')
add_children(root, (pulse1, pulse2, pulse3))
res, missings, errors = root.evaluate_sequence()
assert not res
assert 'dummy' in missings
assert len(errors) == 1
assert '2_start' in errors
def test_sequence_compilation7(root):
"""Test compiling a nested sequence with a disabled item
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{3_stop} + 0.5', def_2='10.0')
pulse3bis = Pulse(def_1='{3_stop} + 0.5', def_2='10.0', enabled=False)
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence()
add_children(sequence2, (pulse3, pulse3bis))
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
pulses = root.simplify_sequence()
assert res
assert len(pulses) == 5
assert pulses[0] is pulse1
assert pulses[0].start == 1.0
assert pulses[0].stop == 1.5
assert pulses[0].duration == 0.5
assert pulses[1] is pulse2
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2] is pulse3
assert pulses[2].start == 3.5
assert pulses[2].stop == 10.0
assert pulses[2].duration == 6.5
assert pulses[3] is pulse4
assert pulses[3].start == 2.0
assert pulses[3].stop == 2.5
assert pulses[3].duration == 0.5
assert pulses[4] is pulse5
assert pulses[4].start == 3.0
assert pulses[4].stop == 3.5
assert pulses[4].duration == 0.5
def test_sequence_compilation8(root):
"""Test compiling a nested sequence in two passes on the external
sequence.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{7_start} - 1.0')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{3_stop} + 0.5', def_2='10.0')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence()
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
pulses = root.simplify_sequence()
assert res
assert len(pulses) == 5
assert pulses[0] is pulse1
assert pulses[0].start == 1.0
assert pulses[0].stop == 2.0
assert pulses[0].duration == 1.0
assert pulses[1] is pulse2
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2] is pulse3
assert pulses[2].start == 3.5
assert pulses[2].stop == 10.0
assert pulses[2].duration == 6.5
assert pulses[3] is pulse4
assert pulses[3].start == 2.0
assert pulses[3].stop == 2.5
assert pulses[3].duration == 0.5
assert pulses[4] is pulse5
assert pulses[4].start == 3.0
assert pulses[4].stop == 3.5
assert pulses[4].duration == 0.5
def test_sequence_compilation9(root):
"""Test compiling a nested sequence in multi passes.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{7_start} - 1.0')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='{6_start} + 1.0')
pulse3 = Pulse(def_1='{3_stop} + 0.5', def_2='10.0')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence()
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
pulses = root.simplify_sequence()
assert res
assert len(pulses) == 5
assert pulses[0] is pulse1
assert pulses[0].start == 1.0
assert pulses[0].stop == 2.0
assert pulses[0].duration == 1.0
assert pulses[1] is pulse2
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2] is pulse3
assert pulses[2].start == 3.5
assert pulses[2].stop == 10.0
assert pulses[2].duration == 6.5
assert pulses[3] is pulse4
assert pulses[3].start == 2.0
assert pulses[3].stop == 2.5
assert pulses[3].duration == 0.5
assert pulses[4] is pulse5
assert pulses[4].start == 3.0
assert pulses[4].stop == 3.5
assert pulses[4].duration == 0.5
def test_sequence_compilation10(root):
"""Test compiling a nested sequence with circular reference in the deep
one.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{7_start} - 1.0')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='{6_start} + 1.0')
pulse3 = Pulse(def_1='{3_stop} + 0.5', def_2='10.0')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='{1_stop}', def_2='0.5',
def_mode='Start/Duration')
sequence2 = BaseSequence()
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
assert not res
assert len(missings) == 2
assert '7_start' in missings
assert '1_stop' in missings
assert not errors
def test_sequence_compilation11(root):
"""Test compiling a nested sequence with circular reference in the deep
one.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{7_start} - 1.0')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='{6_start} + 1.0')
pulse3 = Pulse(def_1='{3_stop} + *0.5', def_2='10.0')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence()
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
assert not res
assert len(errors) == 1
assert '5_start' in errors
def test_sequence_compilation12(root):
"""Test compiling a nested sequence using local vars.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{3_stop} + 0.5', def_2='{b}')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(local_vars=OrderedDict({'b': '2**2'}))
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
print(missings, errors)
pulses = root.simplify_sequence()
assert res
assert len(pulses) == 5
assert pulses[0] is pulse1
assert pulses[0].start == 1.0
assert pulses[0].stop == 1.5
assert pulses[0].duration == 0.5
assert pulses[1] is pulse2
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2] is pulse3
assert pulses[2].start == 3.5
assert pulses[2].stop == 4
assert pulses[2].duration == 0.5
assert pulses[3] is pulse4
assert pulses[3].start == 2.0
assert pulses[3].stop == 2.5
assert pulses[3].duration == 0.5
assert pulses[4] is pulse5
assert pulses[4].start == 3.0
assert pulses[4].stop == 3.5
assert pulses[4].duration == 0.5
def test_sequence_compilation13(root):
"""Test compiling a nested sequence with wrong local vars definitions.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{3_stop} + 0.5', def_2='{b}')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(local_vars=OrderedDict({'b': '2**',
'c': '{dummy}'}))
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
assert not res
assert len(missings) == 2
assert 'b' in missings
assert 'dummy' in missings
assert '4_b' in errors
def test_sequence_compilation14(root):
"""Test the locality of local vars.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{3_stop} + 0.5', def_2='{b}')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='{b}', def_mode='Start/Duration')
sequence2 = BaseSequence(local_vars=OrderedDict({'b': '2**2'}))
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
assert not res
assert len(missings) == 1
assert 'b' in missings
assert not errors
# No test of the evaluation errors on the defs as this is handled
# at the Item level and tested in the test of the Pulses.
def test_sequence_compilation15(root):
"""Test compiling a nested sequence with internal fixed length.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{4_start} + 0.5',
def_2='{4_start}+{4_duration}-0.5')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(time_constrained=True,
def_1='{3_stop} + 0.5', def_2='6')
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
pulses = root.simplify_sequence()
assert res
assert len(pulses) == 5
assert pulses[0] is pulse1
assert pulses[0].start == 1.0
assert pulses[0].stop == 1.5
assert pulses[0].duration == 0.5
assert pulses[1] is pulse2
assert pulses[1].start == 2.5
assert pulses[1].stop == 3.0
assert pulses[1].duration == 0.5
assert pulses[2] is pulse3
assert pulses[2].start == 4
assert pulses[2].stop == 5.5
assert pulses[2].duration == 1.5
assert pulses[3] is pulse4
assert pulses[3].start == 2.0
assert pulses[3].stop == 2.5
assert pulses[3].duration == 0.5
assert pulses[4] is pulse5
assert pulses[4].start == 3.0
assert pulses[4].stop == 3.5
assert pulses[4].duration == 0.5
def test_sequence_compilation16(root):
"""Test compiling a nested sequence with internal fixed length but
incoherent pulse start.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{4_start} - 0.5',
def_2='{4_start}+{4_duration}-0.5')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(time_constrained=True,
def_1='{3_stop} + 0.5', def_2='6',
name='test')
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
assert not res
assert 'test-start' in errors
def test_sequence_compilation17(root):
"""Test compiling a nested sequence with internal fixed length but
incoherent pulse stop.
"""
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{4_start} + 0.5',
def_2='{4_start}+{4_duration}+0.5')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(time_constrained=True,
def_1='{3_stop} + 0.5', def_2='6',
name='test')
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
assert not res
assert not missings
assert 'test-stop' in errors
def test_sequence_compilation18(root):
"""Test compiling a nested fixed duration sequence.
"""
root.external_vars = OrderedDict({'a': 1.5})
root.time_constrained = True
root.sequence_duration = '100'
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{4_start} + 0.5',
def_2='{4_start}+{4_duration}-0.5')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(time_constrained=True,
def_1='{3_stop} + 0.5', def_2='6',
name='test')
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
res, missings, errors = root.evaluate_sequence()
print(errors)
assert res
root.sequence_duration = '1'
res, missings, errors = root.evaluate_sequence()
assert not res
assert 'root-stop' in errors
| 32.30661
| 79
| 0.629038
| 3,422
| 22,970
| 4.08533
| 0.054939
| 0.111588
| 0.058584
| 0.018884
| 0.87382
| 0.856652
| 0.834621
| 0.822175
| 0.821173
| 0.795708
| 0
| 0.074521
| 0.210753
| 22,970
| 710
| 80
| 32.352113
| 0.696619
| 0.092686
| 0
| 0.827935
| 0
| 0
| 0.091139
| 0.00505
| 0
| 0
| 0
| 0
| 0.404858
| 1
| 0.048583
| false
| 0
| 0.012146
| 0
| 0.062753
| 0.008097
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e573b1fd97f8dc5f944ce7eb68a90fbd0627d6cd
| 143
|
py
|
Python
|
lib/__init__.py
|
antonOO/vmware-openapi-generator-1
|
f06cf93a683969e6a6fb9560f2e0a029bb769e89
|
[
"MIT"
] | 19
|
2018-12-07T18:54:25.000Z
|
2021-12-06T23:10:41.000Z
|
lib/__init__.py
|
antonOO/vmware-openapi-generator-1
|
f06cf93a683969e6a6fb9560f2e0a029bb769e89
|
[
"MIT"
] | 27
|
2019-01-07T08:38:36.000Z
|
2021-04-28T15:52:51.000Z
|
lib/__init__.py
|
antonOO/vmware-openapi-generator-1
|
f06cf93a683969e6a6fb9560f2e0a029bb769e89
|
[
"MIT"
] | 19
|
2018-12-07T06:47:53.000Z
|
2021-12-13T15:59:28.000Z
|
from .api_endpoint.api_metadata_processor import ApiMetadataProcessor
from .rest_endpoint.rest_metadata_processor import RestMetadataProcessor
| 47.666667
| 72
| 0.916084
| 16
| 143
| 7.8125
| 0.5625
| 0.272
| 0.368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055944
| 143
| 2
| 73
| 71.5
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e5931975756373502bcbc7411cdfad661bac392a
| 17,561
|
py
|
Python
|
install/app_store/tk-multi-workfiles/v0.7.4/python/tk_multi_workfiles/ui/resources_rc.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 4
|
2019-01-11T03:41:28.000Z
|
2019-09-12T06:57:17.000Z
|
bundle_cache/app_store/tk-multi-workfiles/v0.7.4/python/tk_multi_workfiles/ui/resources_rc.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | null | null | null |
bundle_cache/app_store/tk-multi-workfiles/v0.7.4/python/tk_multi_workfiles/ui/resources_rc.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | 2
|
2019-01-10T05:00:18.000Z
|
2020-02-15T16:32:56.000Z
|
# -*- coding: utf-8 -*-
# Resource object code
#
# by: The Resource Compiler for PySide (Qt v4.7.4)
#
# WARNING! All changes made in this file will be lost!
from tank.platform.qt import QtCore
qt_resource_data = "\x00\x00\x0b\x02\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00x\x00\x00\x00P\x08\x06\x00\x00\x00\xd2\x9b\xb1\x89\x00\x00\x03\xf0iCCPICC Profile\x00\x00(\x91\x8dU\xddo\xdbT\x14?\x89o\x5c\xa4\x16?\xa0\xb1\x8e\x0e\x15\x8b\xafUS[\xb9\x1b\x1a\xad\xc6\x06I\x93\xa5\xe9B\x1a\xb9\xcd\xd8*\xa4\xc9un\x1aS\xd76\xb6\xd3mU\x9f\xf6\x02o\x0c\xf8\x03\x80\xb2\x07\x1e\x90xB\x1a\x0c\xc4\xf6\xb2\xed\x01\xb4ISA\x15\xd5$\xa4=t\xda@h\x93\xf6\x82\xaap\xae\xafS\xbb]\xc6\xb8\x91\xaf\x7f9\xe7w>\xef\xd15@\xc7W\x9a\xe3\x98I\x19`\xde\xf2]5\x9f\x91\x8f\x9f\x98\x96;V!\x09\xcfA'\xf4@\xa7\xa6{N\xba\x5c.\x02.\xc6\x85G\xd6\xc3_!\xc1\xde7\x07\xda\xeb\xffsuV\xa9\xa7\x03$\x9eBlW=}\x1e\xf1i\x80\x94\xa9;\xae\x0f \xdeF\xf9\xf0)\xdfA\xdc\xf1<\xe2\x1d.&\x88Xax\x96\xe3,\xc33\x1c\x1f\x0f8S\xea(b\x96\x8b\xa4\xd7\xb5*\xe2%\xc4\xfd31\xf9l\x0c\xf3\x1c\x82\xb5#O-\xea\x1a\xba\xcczQv\xed\x9aa\xd2X\xbaOP\xff\xcf5o6Z\xf1z\xf1\xe9\xf2\xe6&\x8f\xe2\xbb\x8f\xd5^w\xc7\xd4\x10\x7f\xaek\xb9I\xc4/#\xbe\xe6\xf8\x19&\x7f\x15\xf1\xbd\xc6\x5c%\x8dx/@\xf2\x99\x9a{\xa4\xc2\xf9\xc97\x16\xebS\xef \xde\x89\xb8j\xf8\x85\xa9P\xbeh\xcd\x94&\xb8mry\xce>\xaa\x86\x9ck\xba7\x8a=\x83\x17\x11\xdf\xaa\xd3B\x91\xe7#@\x95fs\xac_\x88{\xeb\x8d\xb1\xd0\xbf0\xee-L\xe6Z~\x16\xeb\xa3%\xeeGp\xdf\xd3\xc6\xcb\x88{\x10\x7f\xe8\xda\xea\x04\xcfYX\xa6f^\xe5\xfe\x85+\x8e_\x0es\x10\xd6-\xb3T\xe4>\x89D\xbd\xa0\xc6@\xee\xd7\xa7\xc6\xb8-9\xe0\xe3!r[2]3\x8e\x14B\xfe\x92c\x06\xb3\x88\xb9\x91\xf3nC\xad\x84\x9c\x1b\x9a\x9b\xcbs?\xe4>\xb5*\xa1\xcf\xd4\xae\xaa\x96e\xbd\x1dD|\x18\x8e%4\xa0`\xc3\x0c\xee:X\xb0\x012\xa8\x90\x87\x0c\xbe\x1dpQS\x03\x03L\x94P\xd4R\x94\x18\x89\xa7a\x0ee\xedy\xe5\x80\xc3q\xc4\x98\x0d\xac\xd7\x995Fi\xcf\xe1\x11\xee\x84\x1c\x9bt\x13\x85\xec\xc7\xe7 )\x92Cd\x98\x8c\x80L\xde$o\x91\xc3$\x8b\xd2\x11rp\xd3\xb6\x1c\x8b\xcfb\xdd\xd9\xf4\xf3>4\xd0+\xe3\x1d\x83\xcc\xb9\x9eF_\x14\xef\xac{\xd2\xd0\xaf\x7f\xf4\xf7\x16k\xfb\x91\x9ci+\x9fx\x07\xc0\xc3\x0e\xb4\x98\x03\xf1\xfa\xaf.\xfd\xb0+\xf2\xb1B.\xbc{\xb3\xeb\xea\x12L<\xa9\xbf\xa9\xdb\xa9\xf5\xd4\x0a\xee\xab\xa9\xb5\x88\x91\xfa=\xb5\x86\xbfUHcnf\x90\xd1<>F\x90\x87\x17\xcb ^\xc3e||\xd0p\xff\x03yv\x8c\xb7%b\xcd:\xd7\x13iX'\xe8\x07\xa5\x87%8\xdb\x1fI\x95\xdf\x94?\x95\x15\xe5\x0b\xe5\xbcrw[\x97\xdbvI\xf8T\xf8V\xf8Q\xf8N\xf8^\xf8\x19d\xe1\x92pY\xf8I\xb8\x22|#\x5c\x8c\x9d\xd5\xe3\xe7c\xf3\xec\x83z[\xd52M\xbb^S0\xa5\x8c\xb4[zI\xcaJ/H\xafH\xc5\xc8\x9f\xd4-\x0dIc\xd2\x1e\xd4\xec\xde<\xb7x\xbcx-\x06\x9c\xc0\xbd\xd5\xd5\xf6\xb18\xaf\x82Z\x03N\x05\x15xA\x87-8\xb3m\xfeCk\xd2K\x86Ha\xdb\xd4\x0e\xb3Yn1\xc4\x9c\x98\x15\xd3 \x8b{\xc5\x11qH\x1cg\xb8\x95\x9f\xb8\x07u#\xb8\xe7\xb6L\x9d\xfe\x98\x0ah\x8c\x15\xafs \x98:6\xab\xccz!\xd0y@}z\xdag\x17\xed\xa8\xed\x9cq\x8d\xd9\xba/\xefS\x94\xd7\xe54~\xaa\xa8\x5c\xb0\xf4\xc1~Y3M9Py\xb2K=\xea.\xd0\xea \xb0\xef \xbf\xa2\x1f\xa8\xc1\xf7-\xb1\xf3z$\xf3\xdf\x068\xf4\x17\xdeY7\x22\xd9t\x03\xe0k\x0f\xa0\xfb\xb5H\xd6\x87w\xe2\xb3\x9f\x01\x5c8\xa07\xdc\x85\xf0\xceO$~\x01\xf0j\xfb\xf7\xf1\x7f]\x19\xbc\x9bn5\x9b\x0f\xf0\xbe\xea\xf8\x04`\xe3\xe3f\xf3\x9f\xe5fs\xe3K\xf4\xbf\x06p\xc9\xfc\x17Y\x00qx\xc4(\xc2@\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x06\xb8IDATx\x9c\xed\x9dMr\xf4(\x0c\x86\xc5\x14\xbd\xe9\xf3\xcc]r\xb7\x5c#\xd7\xc8e\xb2\xf1\x22\xb8\x86\xd94\x8e,#\x10\xbf\x96\x13?U)\xfaKc\xac\xe65H\x02:\x9fy\x7f\x7f\xff\xcf{o\xe0\xe6\xd7a\x8c\xf1\xd6{o\xde\xde\xde\xfe\x05\x00x>\x9f^r\xe1\xb2,\x87\x07\xc29\xd7\xdb\xbe&\xb4\xd9\x03\x00\x1e\x00LA\xd9\xdc\xf6\xc7\xc7\xc7\xa7\xc5\xb5b\xc2q(\xec\xc0a6=\x1e\x0f\xef\x9c3\xb4,l\xc6\x902\x87Dh\xcf\xbc\xde\xae\xdb\x09\x9cC\x9b\xa8\xd4\x9eNB\xc4\xee\x13m\x03\xdf\x07\xd9#\xbd\x1f\x15\x9c+%m\x1c\x08}\x93\x15x\xb4\xa8\xcf\xe7\xd3/\xcbbB\xd9bO\x10\x82\x0aB\x85H]\x9b\xb9\xb7I\x95g\xc2\xf5KT\xe0\x9c\xa8\xd4W\x97L\xed\x14z-\x16\x9c\xa9/\xeeP\x8dB\x14\x90\xf5\xd5\x92\xcfc\x01\xcaGi\x10%'\x06\xad/\xa9C\xcb\x1a\xfb~;\xce9x<\x1e\x00\xf03;qu-@\xfd\x88\x8c\x89\xd1\x8b\x8b\x8a\xda3B\xde|q\x10\x14\x0bK\x89\xc4\x03\x06\xe0%\xf0\x08\x81J9KP4\x02z\x8a\x92\x03\xb7-\xb1q{-\x89\x03\xf0\xeb\xa2(\xba75\xa2\x0e\x88\x90\xa5U\xabF\x9f\xb0\xac~\xc0\xb9\xfe\x08#y\xa8\xc04B~>\x9f\xfe\xeb\xeb\xabI\x94\xd2\x08Y\xf0\x10\x14\x8b\xd1\x8b\x19\xb3\xd6A\xe0X\xe0T;\x85/\xcbb\x9cs!\xf2\x85eY\x00\xe0\xe7\xe9\x8a\xf1\xcb#\xe4h\x9e\xde\x92\xbf\xe7\xea\x1d\x04\xc6bb\xa1c\x912'<}2/*F\x0e\x91\x8f&}Q\xf4\xf9k\x84\xa6%;E\xa7\xd2\x16\xe6\x06\x12;\xaeLN\xd0\xa2\xfc\x14\xe0(\xf4\x88\x81\xd0\xe4\x83\x95\x8a*\x89\x86\x87D\xc84?\x15\xd4\xdf\x09)\x9d\xbas#\x1b\xeb\xa2j-\xba\xf3\xa2~\x09T\xfcl}d\xd7\xael\x19}\xa5\xd7\xe0\xfe)Z\xaa$7M6\xde3m\x89LY\xe1e\xd3B\x01)\xa5\xef\xc5\xec\x13\xde\xfaH\xcf\xc0)\xd8\xe2\xbd\x87u]\xc1Z\xcb\xf6\xcbA`\xe9\x87\x90\xee\xb0H\xaeI\xd4\x1b\x9e\xaa\x08l)\xaa\x9f{\xf0[\x84\xfe\xfe\xfe\x86u]\xc1\xfb\xf8$c\xad\xf5\xeb\xba\xee\xae\x09\x02oW\xc4VM$F\x5c4RN.\xe4\xbf\x1e\xd2\xaeiK@\xda\xa6s\xee\xf0\xc0\x07\x11\xb9\x12\x136\x1b\xae \xc6\x08v+I\xd2\xf5^I\xc3\xb9\x07\x9fk\xa7d\xc6\x08#\x96+\x01\x00\xfe\x11\xb7\xa6\x03_X\xb2\xbcF\x06\x1d!\xb9k\xaa\x07BpY\xb1\xf2\xe5O\xbds.\xf8S\x11\xb1\x11K\x99\xb1\x16=b\x87\xa5\xe6\xde;$\xa34\xb5,\xda\xba\xe2T\xe3O9R\xf5\xa2\x02w\x8e\x90\xa5\xd7I\x85\x8e\x8d\xd2C0\xe6\xf6\xc7g\x86\xba\xa0\x1a\x7fj\xadll\xd5\x08\x8d\xebs':\xc4\x112\xe9\xc8\x14\xb9\xc8\xb8y\x14\x8f\xca\xd3k\x03\xa7\x94=\xa9\xc0(F\x89\xd0\x98\xa4\x0fN\x05\x08\xe1\x07NH]0\xd8\x97\x06R\xfe\x8e{Ox/i~Z\xedOK\x22d\x09\xa7\xee\x073d}un\xa4\xd6\xb8\x93\x96\x089\xac\xd3\x87\x7f\xb7\x8a\x02p\x8c\x90\xa5\xd7\x85\x1d\xbb\x80\xa6(:\xb5\x85\x88\xcb\xe2\x11Gg\x1e\xee\xbd\x12c\xf1\xcc\x11F\xaa\xb4\x94P\xf2\x90,\xcb\xb2\xfdPZG\xf0\x903H\xe1\x17\x92\x9c\x94N\xb1\xa5\xfeR\xba\xa0/\xf1\xa7Rj\xfdi &${/\xf4Z\xcd\x19$\x8cd\xc4\xd5D\xfaR\x7f\xda\x1a\xc5\xa6(i\xbbD\xd4\xdd=\x12\xef\x0d=\x83\x14\xa8\x8d|9A[\x85\xe6\xf2\xd3\x1a\x9f\xc8\xd5\x93>\x08\xb5\xa2b\xb0\xc0\xb5iK\x11\xad\xa9\x8cT@I=l\xcb\xec\xfc\x94\xa3\x87\xa8\x98\xd1Qtr\xf1~\xc4\x19$J\x89/\x05\x90\xa7+->\x14\xd3[PJ\x8d\xc0\xa2\xa9\xbbf%i\xc4b>*%\xcd\x1e\xc8-\xe8\xd7L\xdd\xa3E\xc5H|0\x17|u\xcbO{n1\xe2YaY\x96\xa8\x00=\xa7\xd7\x99\xfe\xb4\x86\x94\xc0\xa2Q\x8a\xb6\xd8<\x97\xd2D\xae\xdb\xb5\xdd\xe3\x18)\xca\x95\x0d\x00\xb0\x8b\xf8\x98\x96\xc0\xe9\x0c\x7fZ\x03M\x938\xf0(\xddM\xbb-\xa3\xaf\xd5\x9f\xa6\xa8\xf1\x913\xf3\xd3Y\xc4\xa2\xe8\x035\xfekF\xe0$\xb1k\xb4\xd0\x1aE\x05\xf8\xd1\xac\xea\xfb\xc1\x98\x11g\x90R\xf5\x1d9lV\x93\x93\xb6F\xc8\x1aE\xb5\xd6z\xac[H\xf7\xb63Y\xa3\xd3\x96\x96\xb6G,\xe6\x97\xa0QP\x80\xbd\xa8\xce9\xfeL\x16\xc7\xc8\xb4%\xd3V\xf4\xf7#R\x16\x0e\xad\xa2\x02\x1c\x03\xca\x14\x97\xfb\xfa\xe8_\xa1W\xdf\x0c\xdb.\xcc\x1d.\xf3\xaf<\xa6\xf7!3LI\xdb\x00\xe9m\xb7\x19\xc4\x0e/\xb4\xb2;6\xcbM\xa3-\xfe3w\xb8l3\xa4\xd3!3\xfc\xfe\x15r\xd5\xd1\xb3X\xf2\xd0\x1dwQ\xc9\x97\x9f\x80\x1c\xd6\xce\x1a\xd4 \xf4\x15\xa2\xdf\xd9n\xa9\xe8\xd0\x1d\x85.\x0bJ\xae\x99!4\xe6\xea\xa2\x96\xba\x99\xc0\xb6,\x9b\xaaT\xba\x00\xd1\xb2\xe4\xd7s\xb7F\xab\xa8\xb5b\x05\xaa\x16m$\x95b>\xda\xa13\xbe4?\xad\xa15\xd59ST\xe9\x08\x9d\x95\xb7c\xaa\xbe\x1f\x1c\xbe\xae\xd83\x1f\xbd\x9a?\xd5\x9e\xe2\x1d\x96*\xb9\xc0)\xf6AF\x9e=\xd2\xecO5\x8b\xca.\x0e\xd1_\xd0}TI\xe3#O\xe7\xdf\xa2\xf2Hl\xdb\xfeVeX\xc0\xe7\x0e\x9bIn(\xcdOS\xfc5Aq\xe0%\xe9\x9fR\xfb\xc4>\xb8G\x84\xccq\x15Q[\xa3`\xca\xba\xae&\xd77\xad\x0f\xdc&\xf0\xacCf\x01\xad\xa2JE\x1c\x19\x11w_\xaa\x8c\xbe1`\xc7\xe6lQ\x01\xe4\xa7*g3\xca5\xb0\x02\xff\x06\x7f\x0ap\xfd \xa9\x95\xac\x0f\xbe\x9a?\xd5\xcc\x19\x0f[\x97\xfd`m\xa2j\x1a\xb5g\xdbb\x01\xae\x99\x9fRzt\xa4$\xc0\x1a\x91\xca\x8c\xc4\x02\x5c\xc7\x9fb\xc8\xb2\xa9\xf8;E\x12j\x02-M\xa2b\x92\xbd\xa2YTk\xad(\x8f\x1cm\x8bvb\x7f/\xfa\x0c;XRk\xe1\xb3\xb9\x8a\xa8\x98\xf0\x9fr\x9cm\xc7\x86\xb6N\xd4fO)\xa7\xff\x11\x16\x8d\x1d\xa8\xd1\xa6ZN\x11X[\x07j\xb3\xa7'\xd3\x04\x9e\xb5\xa0/\xf5\xcf\xbfYT\xccP\x81i\xd4[J\xcf`\xea\xaf\x08J\xe9*0\xd7\x89gD\xbd\x7fUPJ\xb3\xc0\x9a:R\x93-Z\xa8\x12XSGj\xb2E#\xd2c\xb3\xa3\xed(B\x9b=\x9a\xa0}\xc3\x0a<l\x03\x9aD\xc9W[\xbc\xd7H\xaa\x7fv\x02\x1f\xd4\xef|\x06)p\x8b\xdaFI\xdfl\xa7*s\xa9\xcc\x8cH\xf8\x16\x95\xa7\xb6o\x0e\x87\xeefs\x8b\xca\xd3e\x8f\xbb\x83\x1dE\xdc\x82\xa6\xe9\xdd?S\x04\xbeE\xe59\xe5\x0b\xe0=\xb8E\xe5\x99\xd97\xdb\x99\xacT\xa5;\xeam\xe7\xac\xfe\x11\x9f\xc9\x8aq\x8b\xca\xa3\xa5o\xac1\xc6\x7f~~~\x9em\xc8M\x7f\x8c1\xfe\x7fY1-\x9b\x81\xda\xfb\x17\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x09\x85\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x01\xd5iTXtXML:com.adobe.xmp\x00\x00\x00\x00\x00<x:xmpmeta xmlns:x=\x22adobe:ns:meta/\x22 x:xmptk=\x22XMP Core 5.1.2\x22>\x0a <rdf:RDF xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22>\x0a <rdf:Description rdf:about=\x22\x22\x0a xmlns:tiff=\x22http://ns.adobe.com/tiff/1.0/\x22>\x0a <tiff:Compression>5</tiff:Compression>\x0a <tiff:PhotometricInterpretation>2</tiff:PhotometricInterpretation>\x0a <tiff:Orientation>1</tiff:Orientation>\x0a </rdf:Description>\x0a </rdf:RDF>\x0a</x:xmpmeta>\x0am\x05\x0b\x9e\x00\x00\x07VIDATx\x01\xed[MK$W\x14\xedV[\x8d(d\x97\x84\xd9\xe9N\xdcd7C@\x5c\xcd\x22\x90\xec$\x7fAaV\xd9\x09\x92\x7f\x11\xb2\xcb\xcaY\x09&\x81\x90\xb5\xf8\x81\xab\x08\x19G\xc1\xcd\xb8\x0a$\xd9\x1b\xbf5\xf7t\xfa4\xc7;\xaf\xaa\xde\xad\xee\x16\x06}P\xdeW\xf7\xdc\xcf\xf3^}\xb4\xb6\xcd\xbb\xbb\xbb\xc6c\x1eC\x8f\xb9y\xf4\xfeD\xc0\xd3\x0ex\xe4\x0c\x8c\xd4\xed\x7foo\xefY\xb3\xd9lU\xf9\x8f\x8f\x8fW\x994\xd4\x06\xf3\x9b\x9b\x9b\x7fgff\xfe1G\x5c\xa2\xb7\x95\x01z0\x08\x13\xb0\xb3\xb33m\xf9\xbe\xb7\xe6_\xdc\xde\xde\xb6L6x\xa0\x0e\xce)\xaf\xae\xae\xda\xe5\xf1\x5c%\xed///\xbb~\xd6|\xe3\xe2\xe2\xe2W\xc3\xbe9>>~i\xe7S\xb3\xb3\xb3\xeb\xed \x03\xf8\x11\x22`sss\xc4\x1e\x9b?LNN\xbe<;;k\x17\xcd\x9a\xf08Es\xa9\xe1\x9b\xa6\x8d\xea\xd5\xd7b\xb5\xb7\x8d\x11\xfc\xb1\xed\x88\x1f\x8f\x8e\x8e.\x8d\x84_\xe8\xd7O\x19z\x0aX\xd3\x9fZQ/NOO\xb1M\xef\x1d\xa6o\xf0\xf0\x98\x9e\xc3F\xcfuN\xccd\xfb\xe5\xc4\x88\xb86b>\x1a\x19\x19Y\xb3\xdd\xf0u?\x1bg\xac\x10\x01\xe64fG\x13\x85b\xc5q`\xee\x0fb\x94up\x14x}}\xdd\xe8\x5cB\x93F\xc4\x9a\xed\x84\xbe\x93\x10\x22`tt\xf4\x8e\xcd\xb3)6\xe9%q\xda{\x1c\xe7\xb4\xf1\x18\xf4\x1c\xc0p\x8f09\x85\x9d\xd0o\x12B\x04\x9c\x9f\x9f\xb7\x8bN\x15\xccf(#6\xf4\xa1\x84/\x07t8\x07\x096\x9f\x1a\x1e\x1e^{\xf3\xe6M\xdfvB\x88\x00\x14\x85bX(\xa5o\x96\xe7\xc4!\xa9SI\x5cu\x98C\x8f\x81\xfb\x03\xcf!q9\x98\x9c\x1a\x1b\x1b[;<<\xfc\xaam\xd4\xe3\x8f\xda\x04\xa0 =\xd8\x8cJ\xc51W\x0cs\xc5\x15\x83\x1e\x03\x04\xa8\x1d\xe6\xb2\x13^\xdbN\xe8\x99\x840\x01\x5c\x15\xdf\x906\xa3s\xdfX\x0e\x06\x1f\x0e\xda+\x11\xdc\x09\xadV\xabg\x12\xc2\x04\xa4\x0aR\x9d6\xacE\xa7l\xa8\xa3\xa4/\xce1\xb8\x034\x0em\xe4\xc6\xd8\x13\x09\xb5\x08\xd0\x82P,\x8bb#)YdC\xbd\xc6\xb4y\xf7.\x88\x17\xa4\x94\x0dr\x80\x04\x1bx:\xbc\xde\xdf\xdf\xafu9\x84\x08\xb0W\xd4\xee5YT\x14\x09\xa1\xa4\x9d'\xa5H\xdfY\xf5\xcf\xd0\x995\xff\xbb=yN\xec\xa6\xd7\xb0G\xf0{\x07\xf4CCC\x0d\x93S\x86\xfftpp\xb0\x087\xf8\xe6\x8e\xd0\xab0\x82\xb21\x9f\x00z\xac\x16\x0f4\x88\xb9\x0eo\x93\xc2\xb0\xaa\xb6\xa2\x9fomm}9??\xff\xdb\xf6\xf6\xf6\x17v\xfe\xdcl[ \xc7\x1e\x83]7\xce\xb10f3l/N\xa7\xeb\xeb\xebC\x8b\x8b\x8b7]\xa3\x8aI\x13E\xe5\x8e\x8d\x8d\x8di\xb3\xfd\xc3X\x9f\xa4\x0f\x1b\xf6\x128V\x07\xc3c</\xc2\xac\x19\x10\xfd\xb7\xe1\xaf\x8c\x90\x9f\x17\x16\x16\xaea;\x88\x11\xde\x01\x5cY\xae\xa6\x16E\x1d\x1b\xa4m\x99M\x0a\xc3]\xdeH\xf8\xc4\xb0u\xbb\xd3\xbf\xdd\xdd\xdd}gd\xe2sA\xdb\x9c\x12\x04s\xde\xd9\x0d\xe7f\xff\xed\xdc\xdc\x1c\xc8\xcb\x1aa\x02\xd0$\x1a\x83D\x01\xbei\xcd\x9akC\xc2\xd8\x0cb\xe0s\x00\xce\x8d\x889\x1cj\x93\x9a\xa3\x16\xfb\xb0vc\xb5}g\xee\x83!\x00\xaf\xc2`\x1a\x05\x90\x08\xccuxB\xea\xee\x026\x89\xdd@2\xa8C>\xce)A\x80\x8d3\xcb\x9f\x7fM\x9bC\xed\x1d\x80l\xc85\xc8]\xc0\x1clR%0\x0e\x92\x0e\xb2yc$V%k\x11\x80\x84\x9a\x14\x85\xe9 \xc6\x82{\xdd\x05\x1a\x9b\xf3T\x0e\xe8\xa2#L\x00\x9b\xf1\xc9|AZ\x88\xc7H\x0cmR81\xc8\x14\x9e\x22\x1dv\xb8L##L\x00\x92\x80\x04\x0c_\x98O\xac\xb8\xc7\xbc\x7f\x1d\x5cc\xa4\xfcst\xb5\x08@cl\xce'\xa1\xde\xaf\xb2\x16KLW\xd1\xfb\xd1\x86\xf1\x8bp\xc6\x00\x8e#:B\x04\xe0\x8d\xabs\xb7\xed\xe6I\x15\xd6\x05m\xe2q\xc50W\xdcc9\xb8\xda0V*N\x91.D\x00\x82\xe8=\xc0\xaf\x92&a1\x5c!b\xaa\xf7\xfe),\xc7\x1f6\xa8\x0b\xfe\x0f\xf2\x14\xe0=\xa0\xac)-\xdc7F?J\xc5\xa9S\xa9\xb8\xc6\xf56\xb0\x8b\x8e\xbe\xec\x80\xb2\xa2\x80y\x1c\x85\xfa\xf7\x07\x16\x9e\xc2\x22\xfe\xb8L##L\x00\x0a\xf4;\x00\x09s\x0a\xd7\xc2x)\xd1\x97DA*F\x1f\xc4/\xb3\x01\x8e#:B\x04\xf0U\x98\x89\xaa\x8aJ5\xc7&<\x96*\xdc\xc7\xf76\x1e\xe7\xb9\xb7+;\x0f\x11\x80@\x5c\x9d\xb2d\xc4\xb4Y-Bq\xeaUG?H\x1d)\x1b\xe2\xdc\x95\xde\x87x\x91\x0c\x13\x80\x22\x98\x0cASE\xf9\x22R6Z\x10q\xd5qN\x8c\xa4PO\xa98\xe6\xfe1M\xbb\x22Y\x8b\x00$\xd2\xc4>\xb8b9\x85+aU\xbeE8\x17e\xa0\xaf\xc2\x9d_=\xf9~\xef\x91Q\xd40\x9c|\xf1\x1aH\xb1\x22}Ql\xfa\x92\x04\xf5\xaf\x9a\xd7\xde\x01\x08\xcc\xc4\xba\x82L\x08\xcc?\xea\xbc\x9d\xfa\x97a\x8c\xa9\xd2\xfb\xa2y\xea\xd4\xaej\x1e&@Y.+\x1a\x18l\xabl\xb4@6\x00\x1f\xf5\x83>\x87L\xfak\xcc\xaay\x98\x00$\xe1\x91ST\x8e\x0d\x1b\xd6\xa6Q8\x1b\xc7\xbc\x8aLo\x8f\xf3\x9cQ\x8b\x00\xee\x82\xaa\xa2\xd0P\x8e\x8d\x16\xcaU$)\xde\xdf\xe3\xf4\xe5\xa2\xf0<W\x86\x09@AXU\x0c\xaePQQ\x11\x1b6\x5c\xb4\x0b4G;y\xe7\x07\xf5\xd4=\xc8\xab0\x922\xb1_!6\xad\x0d\xe5\xd8\xb0\x81:\xfe\xf4\xd1\x18\xb9\xf3\xd0\x0e\xc03\x96\xab\xcf\x04 \x22\xe7:\xcf\xb1Q\xd2\x18\x1f\xb2*\x07\x17\x83v\xea[5\x0f\x11\x80`\x5cM&E\xd1\xd4i2\xc5smz\xf1G\x0d\x0fB\x00\x92 \x99\x0e&\xf6M\xe7\xda\xf4\xea\x8f<\x88\x81#:\xfa\xb2\x03rW\xb8l\xa7\x94a\xbc4\xcal\xd8\xf8\xc0_\x85\xf1+'\xbf\x03\x98\x9cRw\x02u^\x96\xd9\x94a\x88\xa38\xc8\xa1\xae=\x09\xfe\x08\xef\x00$\xc7\xc1\xe1\x8b\xe1j\x11\x87\xac\xb2I\xe1U\xfe\x8a3\x07\xe3x\xac\xec\xbc\x16\x01\xdc\x01\xa9f\x99\x8c\xc5\xe4\xda\xd0\x0f\xd2\xfbr\x95\xcbl\x88\xa5l\x89\xa5d\x98\x004\x9fz\x14\xb2Q\x95\x9a0\xd5\x14\x8bMa\xea\x8by\x95\x0d\xf0:#L\x00\x12\xf1(j\x96\x85\xf8\xa2\xa9\xa7T\x9c:\x95\x8a\x93,\xc51\xa7\x0d\xf5\xf8Krd\x84\x09\xe0\xf6\xf7IX\x88\x92\xe2\x8bN\xd90N\x0a\xcb\xf1\xa7\x0d\xfcqDG\x88\x00\xfb\x96x\x13_Vb\x22_t*y\x95\x8d\xe2u\xfc\xe1\xc3\x18\x90\xb6\x03\xee\xff\x221\x15Tt!\x02\x8c\xed\x0b\xdb\x01]\x9a\xc9>\xe3\xb1\x10\xe8\xf5 \x0e\x99\xb2!\x9e\xc2rs`g\x9a\xff\x9d\xfd\x7fA\xe8\xcf\xc3\xff\x7f\xacc\x05\x15rbb\xe2/\xcb\xb1g\xdf\xc3i[\xa2`=R\xee\x8ac\x9e\x1a\xb4\xf1\x18\xed\x89\xf3\x5c\xed\xa8\xc3\xce4\x12\xf6NNN\xb2\xbf\x1e\x838\xa1o\x89\xc1aeee\xdaV\x05\xff2\xf3\xdc\x92\x8f\xeaJs\xb5\xbcN\xcf\xdbI\x13;Dm\xf0\x94\xa9\x8a\xa5q\xec\xe5\xec\xd2\xec\xf7l\xfb\xbfZ^^~\x07,w\x84\x09`\xe0\xd5\xd5\xd5g\xf6\xbd\xbd\xf7\xfeiJ\xff\x01\x8a\xb6\x94\xf8bc\xce(\x8b\x01\xff\x04~\xb5\xb4\xb4\xf4gNloS\x9b\x00\x1f\xe8C=\x0f\xdd\x03>\xd4&\xcb\xea~\x22\xa0\x8c\x9d\xc7\x80=\xfa\x1d\xf0\x1f\x12\xa1h\xf9\x0ei2\x90\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03\xb4\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x0c\x00\x00\x00\x0e\x08\x06\x00\x00\x00\x1b\xbd\xfd\xec\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x01\xd5iTXtXML:com.adobe.xmp\x00\x00\x00\x00\x00<x:xmpmeta xmlns:x=\x22adobe:ns:meta/\x22 x:xmptk=\x22XMP Core 5.1.2\x22>\x0a <rdf:RDF xmlns:rdf=\x22http://www.w3.org/1999/02/22-rdf-syntax-ns#\x22>\x0a <rdf:Description rdf:about=\x22\x22\x0a xmlns:tiff=\x22http://ns.adobe.com/tiff/1.0/\x22>\x0a <tiff:Compression>5</tiff:Compression>\x0a <tiff:PhotometricInterpretation>2</tiff:PhotometricInterpretation>\x0a <tiff:Orientation>1</tiff:Orientation>\x0a </rdf:Description>\x0a </rdf:RDF>\x0a</x:xmpmeta>\x0am\x05\x0b\x9e\x00\x00\x01\x85IDAT(\x15\x95R\xbdj\xc2P\x14\xce5W\xa1\x8aPA\xc1\xd8*U\xc4Z\x84Jq\xc9T|\x04\x9f\xc0\x07p\xea\xd2N\x0e\x8e\x05\xd7Bq\xaac\xd7\xfa\x04Y:\x04B\x04\x03\xe9\x90\x0e\xb5 \xc5\x80\xa8h\x88?1\xe99)i\x8b\x82\xa5\x07\x0e\x9c\xfb\x9d\xef;\x7f\x09q\x1c\x87\xf1\x8c\x10\xe2o6\x9b\xa7\x99L&\xb6\xd9llUU?\x1a\x8d\x86\x06\x9c\x1f\x12\xc6\xe8\xf5z=\xdd\xeb\xf5\x1e\x16\x8b\x85\x04o\xd7\xa7\xd3\xa9\xd4\xedv\xefj\xb5Z\xdc\xe3\x11\x0c\xa02\x95$\xe9\xbeT*](\x8a\x22\x0f\x06\x83g\x96eI2\x99\xbc\xcc\xe7\xf3\xe7\xa2(\x0a<\xcf\xdf\x00\xd7v\xabW\xab\xd5\xa3\xd1h$j\x9a\xf6\x08\xe3\x05\xbcj\x10\x87\xfa\xfd\xfe\x93\xae\xebB\xb9\x5c>D\xdc\x87\xf3'\x12\x09?\xa5\x94\x9dL&o\x00\xae\x10C\x83\xd8\x98\xcdf\xef\xd0-\x90\xcb\xe5(b\xae@\x96ew4X\x94\xe2x\xbf}\xbd^\x13\xc0\x9dN\xa7CP@\xe0\x0a'\x95J\xe5\xb6P(d\xa1\x831\x1e\x8fu\x10`\xce\x1d7\x12\x89\xc4\xc3\xe1\xf0\x01\x1c\xe4\xa5\xddn_S\x8e\xe3\x8e\x8b\xc5b\x16\x09\xd1h4\x04\x9e\xc6x\xdb\x80s\x96J\xa5b>\xcb\xb2lh\xbb\x9d\xdfy\x9b\xa6\xc9\xc0.\x8e\xbb\xc3Nv\x0f\xf0\x7f\x81m\xdb_\x1b\xee\xa9\x8a)\xef\x10\xbe\xf9|\xbeZ.\x97\x7f\xd0\x19\x06~\x19\xc60\x0c\x8b\xb6Z-%\x18\x0c^\xc1\xf9\x02\xdb*\xf8\x98\xdf\xd0p84\x05Ax\xfd\x04\xd4\x0a\xc8\x14u\xd45\x02\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = "\x00\x03\x00\x00x\xc3\x00r\x00e\x00s\x00\x0f\x0f\xcd4'\x00t\x00h\x00u\x00m\x00b\x00_\x00e\x00m\x00p\x00t\x00y\x00.\x00p\x00n\x00g\x00\x10\x01\x0e\x1f\xa7\x00s\x00a\x00v\x00e\x00_\x00a\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x0b\x05r\xa1'\x00p\x00a\x00d\x00l\x00o\x00c\x00k\x00.\x00p\x00n\x00g"
qt_resource_struct = "\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\x00\x00\x000\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x06\x00\x00\x00V\x00\x00\x00\x00\x00\x01\x00\x00\x14\x8f\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 836.238095
| 16,475
| 0.741017
| 3,895
| 17,561
| 3.330937
| 0.272657
| 0.044396
| 0.03191
| 0.016649
| 0.12078
| 0.113843
| 0.110683
| 0.108602
| 0.097194
| 0.093032
| 0
| 0.240844
| 0.012642
| 17,561
| 20
| 16,476
| 878.05
| 0.507411
| 0.008485
| 0
| 0
| 0
| 0.333333
| 0.978684
| 0.949555
| 0
| 0
| 0.00046
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5df3f16d7207e8d6758b11f9d70c35575ad0891
| 4,030
|
py
|
Python
|
src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_spectral_ops.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | 1
|
2021-04-09T15:55:35.000Z
|
2021-04-09T15:55:35.000Z
|
src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_spectral_ops.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_spectral_ops.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
@TensorFlowConverter.register_handler("BatchFFT")
def batch_fft_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchFFT2D")
def batch_fft2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchFFT3D")
def batch_fft3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchIFFT")
def batch_ifft_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchIFFT2D")
def batch_ifft2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchIFFT3D")
def batch_ifft3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FFT")
def fft_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FFT2D")
def fft2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FFT3D")
def fft3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IFFT")
def ifft_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IFFT2D")
def ifft2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IFFT3D")
def ifft3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IRFFT")
def irfft_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IRFFT2D")
def irfft2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IRFFT3D")
def irfft3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("RFFT")
def rfft_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("RFFT2D")
def rfft2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("RFFT3D")
def rfft3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
| 42.87234
| 90
| 0.800496
| 461
| 4,030
| 6.802603
| 0.10846
| 0.241071
| 0.264031
| 0.212372
| 0.887117
| 0.887117
| 0.887117
| 0.887117
| 0.887117
| 0.887117
| 0
| 0.006524
| 0.087097
| 4,030
| 93
| 91
| 43.333333
| 0.845882
| 0
| 0
| 0.321429
| 0
| 0
| 0.334243
| 0.093797
| 0
| 0
| 0
| 0
| 0
| 1
| 0.321429
| false
| 0
| 0.035714
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
00bf01d0081cf21b9b584a14f75c742856ed45f7
| 220
|
py
|
Python
|
tests/__main__.py
|
dipietrantonio/pdf4py
|
1b09fe720a30902d295db85c295a8758768840ad
|
[
"MIT"
] | null | null | null |
tests/__main__.py
|
dipietrantonio/pdf4py
|
1b09fe720a30902d295db85c295a8758768840ad
|
[
"MIT"
] | null | null | null |
tests/__main__.py
|
dipietrantonio/pdf4py
|
1b09fe720a30902d295db85c295a8758768840ad
|
[
"MIT"
] | 1
|
2021-12-22T07:46:34.000Z
|
2021-12-22T07:46:34.000Z
|
import unittest
from .functional_tests import *
from .unit_tests import *
from .aes_unit_tests import *
from .decrypt_unit_tests import *
from .decoders_unit_tests import *
if __name__ == "__main__":
unittest.main()
| 24.444444
| 34
| 0.777273
| 30
| 220
| 5.166667
| 0.4
| 0.354839
| 0.387097
| 0.367742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140909
| 220
| 9
| 35
| 24.444444
| 0.820106
| 0
| 0
| 0
| 0
| 0
| 0.036199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dab31c1794369e517fdc38e61ba79be04bd171ef
| 7,173
|
py
|
Python
|
join.py
|
AGH-Narzedzia-Informatyczne/Project_Labyrinth
|
317f744500fb73f9b8961ec725904cae00aadb92
|
[
"MIT"
] | 1
|
2020-12-16T14:32:23.000Z
|
2020-12-16T14:32:23.000Z
|
join.py
|
Pandoors/Project_Labyrinth
|
317f744500fb73f9b8961ec725904cae00aadb92
|
[
"MIT"
] | 5
|
2020-11-22T19:34:42.000Z
|
2020-12-10T23:57:38.000Z
|
join.py
|
Pandoors/Project_Labyrinth
|
317f744500fb73f9b8961ec725904cae00aadb92
|
[
"MIT"
] | 5
|
2020-12-16T14:31:48.000Z
|
2020-12-16T14:32:17.000Z
|
# POPRZEDNI UI. ZOSTAŁ JEDNAK ODRZUCONY I PRZESZLIŚMY NA TKINTER.
# import labirynth_generator as bk_lab
# import PyMazeDFS
# import PyMazeBFS
# import DFS_generator.mazeToGraphic as mazeToGraphic
# import prim as prim
# import binary_tree as bintree
#
# import PySimpleGUI as sg
#
# sg.theme('Topanga')
#
#
# def bartek():
# layout = [[sg.Text("Labirynt generwany algorytmem BFS wersja 2", justification='center', font='Helvetica 15')],
# [sg.Text('Podaj wymiar N labiryntu:', justification='center', font='Helvetica 15'),
# sg.InputText(size=(8, 5), font='Helvetica 18')],
# [sg.Button("Generuj labirynt NxN", size=(15, 1), font='Helvetica 18')],
# [sg.Button("Zamknij okno", size=(10, 1), font='Helvetica 18')]
# ]
# # Create the Window
# window = sg.Window('Generator labiryntów', layout, element_justification='c')
# # Event Loop to process "events"
# while True:
# event, values = window.read()
# print(event, values)
# if event in (None, 'Zamknij okno'):
# break
# if event in (None, 'Generuj labirynt NxN'):
# bk_lab.generate(int(values[0]))
# window.close()
#
#
# def hania():
# layout = [[sg.Text("Labirynt generwany algorytmem Prima", justification='center', font='Helvetica 15')],
# [sg.Text('Podaj wymiar N labiryntu:', justification='center', font='Helvetica 15'),
# sg.InputText(size=(8, 5), font='Helvetica 18')],
# [sg.Button("Generuj labirynt NxN", size=(15, 1), font='Helvetica 18')],
# [sg.Button("Zamknij okno", size=(10, 1), font='Helvetica 18')]
# ]
# # Create the Window
# window = sg.Window('Generator labiryntów', layout, element_justification='c')
# # Event Loop to process "events"
# while True:
# event, values = window.read()
# print(event, values)
# if event in (None, 'Zamknij okno'):
# break
# if event in (None, 'Generuj labirynt NxN'):
# prim.hania_prim(int(values[0]))
# window.close()
#
#
# def pawel():
# layout = [[sg.Text("Labirynt generwany algorytmem BFS wersja 2", justification='center', font='Helvetica 15')],
# [sg.Text('Podaj wymiar N labiryntu:', justification='center', font='Helvetica 15'),
# sg.InputText(size=(8, 5), font='Helvetica 18')],
# [sg.Button("Generuj labirynt NxN", size=(15, 1), font='Helvetica 18')],
# [sg.Button("Zamknij okno", size=(10, 1), font='Helvetica 18')]
# ]
# # Create the Window
# window = sg.Window('Generator labiryntów', layout, element_justification='c')
# # Event Loop to process "events"
# while True:
# event, values = window.read()
# print(event, values)
# if event in (None, 'Zamknij okno'):
# break
# if event in (None, 'Generuj labirynt NxN'):
# bintree.pawel_tree(int(values[0]))
# window.close()
#
#
# def lukasz_hex():
# layout = [[sg.Text("Labirynt generowany algorytmem DFS ", justification='center', font='Helvetica 15')],
# [sg.Text('Podaj promien labiryntu:', justification='center', font='Helvetica 15'),
# sg.InputText(size=(8, 5), font='Helvetica 18')],
# [sg.Button("Generuj labirynt", size=(15, 1), font='Helvetica 18')],
# [sg.Button("Zamknij okno", size=(10, 1), font='Helvetica 18')]
# ]
# # Create the Window
# window = sg.Window('Generator labiryntów', layout, element_justification='c')
# # Event Loop to process "events"
# while True:
# event, values = window.read()
# print(event, values)
# if event in (None, 'Zamknij okno'):
# break
# if event in (None, 'Generuj labirynt'):
# mazeToGraphic.generate(int(values[0]))
# window.close()
#
#
# def konrad_dfs():
# layout = [[sg.Text("Labirynt generowany algorytmem DFS", justification='center', font='Helvetica 15')],
# [sg.Text('Podaj wymiar N labiryntu:', justification='center', font='Helvetica 15'),
# sg.InputText(size=(8, 5), font='Helvetica 18')],
# [sg.Button("Generuj labirynt NxN", size=(15, 1), font='Helvetica 18')],
# [sg.Button("Zamknij okno", size=(10, 1), font='Helvetica 18')]
# ]
# # Create the Window
# window = sg.Window('Generator labiryntów', layout, element_justification='c')
# # Event Loop to process "events"
# while True:
# event, values = window.read()
# print(event, values)
# if event in (None, 'Zamknij okno'):
# break
# if event in (None, 'Generuj labirynt NxN'):
# PyMazeDFS.generate(int(values[0]))
# window.close()
#
#
# def konrad_bfs():
# layout = [[sg.Text("Labirynt generowany algorytmem BFS", justification='center', font='Helvetica 15')],
# [sg.Text('Podaj wymiar N labiryntu:', justification='center', font='Helvetica 15'),
# sg.InputText(size=(8, 5), font='Helvetica 18')],
# [sg.Button("Generuj labirynt NxN", size=(15, 1), font='Helvetica 18')],
# [sg.Button("Zamknij okno", size=(10, 1), font='Helvetica 18')]
# ]
# # Create the Window
# window = sg.Window('Generator labiryntów', layout, element_justification='c')
# # Event Loop to process "events"
# while True:
# event, values = window.read()
# print(event, values)
# if event in (None, 'Zamknij okno') or None:
# break
# if event in (None, 'Generuj labirynt NxN'):
# PyMazeBFS.generate(int(values[0]))
# window.close()
#
#
# layout = [
# [sg.Text("Witaj! Wybierz jeden z poniższych generatorów labiryntów.", justification='center', font='Helvetica 15')],
# [sg.Text("Naciśnięcie jednego z poniższych przycisków otworzy nowe okno konfiguracji", justification='center',
# font='Helvetica 15')],
# [sg.Button('DFS', size=(15, 1), font='Helvetica 20')],
# [sg.Button('BFS', size=(15, 1), font='Helvetica 20')],
# [sg.Button('BFS wersja 2', size=(15, 1), font='Helvetica 20')],
# [sg.Button('HEX DFS', size=(15, 1), font='Helvetica 20')],
# [sg.Button('Pawel', size=(15, 1), font='Helvetica 20')],
# [sg.Button('Prim', size=(15, 1), font='Helvetica 20')],
# [sg.Button("Zakończ program", size=(10, 1), font='Helvetica 18')]
# ]
# # Create the Window
# window = sg.Window('Wybór generatora', layout, element_justification='c')
# # Event Loop to process "events"
# while True:
# event, values = window.read()
# print(event, values)
# if event in (None, 'Zakończ program') or None:
# break
#
# if event in (None, 'DFS'):
# konrad_dfs()
# if event in (None, 'BFS wersja 2'):
# bartek()
# if event in (None, 'BFS'):
# konrad_bfs()
# if event in (None, 'HEX DFS'):
# lukasz_hex()
# if event in (None, 'Pawel'):
# pawel()
# if event in (None, 'Prim'):
# hania()
# window.close()
| 42.443787
| 122
| 0.576467
| 833
| 7,173
| 4.939976
| 0.129652
| 0.123208
| 0.069259
| 0.060024
| 0.838882
| 0.831106
| 0.784447
| 0.75966
| 0.710571
| 0.685784
| 0
| 0.029478
| 0.257493
| 7,173
| 168
| 123
| 42.696429
| 0.743147
| 0.951206
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dabfeb1dd189f05dc07e5e35a2c9e0bf5df22970
| 4,883
|
py
|
Python
|
matroids/construct/rank_function.py
|
PotassiumIodide/matroid-theory-in-python
|
51c06ba728c9d9002234fe98b1bc84bffb86a0cb
|
[
"MIT"
] | 2
|
2020-11-27T09:51:49.000Z
|
2021-11-10T07:16:34.000Z
|
matroids/construct/rank_function.py
|
PotassiumIodide/matroid-theory-in-python
|
51c06ba728c9d9002234fe98b1bc84bffb86a0cb
|
[
"MIT"
] | 1
|
2020-11-16T07:22:29.000Z
|
2020-11-16T07:22:29.000Z
|
matroids/construct/rank_function.py
|
PotassiumIodide/matroid-theory-in-python
|
51c06ba728c9d9002234fe98b1bc84bffb86a0cb
|
[
"MIT"
] | null | null | null |
from typing import Callable, TypeVar
from matroids.core.set_operator import powset
import matroids.construct.independent_sets as independent_sets
import matroids.construct.circuits as circuits
T = TypeVar('T')
def from_independent_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by independent sets.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by independent sets.
Returns:
Callable[set[T], int]: The rank function of a given matroid.
"""
E, Is = matroid
# r(X) = max{|I|: I ∈ Is, I ⊆ X}, ∀X ⊆ E.
return lambda X: max(map(len, (I for I in Is if I <= X)))
def from_dependent_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by dependent sets.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by dependent sets.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, _ = matroid
return from_independent_matroid((E, independent_sets.from_dependent_matroid(matroid)))
def from_bases_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by bases.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by bases.
Returns:
Callable[[set[T]], int]: A rank function of a given matroid.
"""
E, _ = matroid
return from_independent_matroid((E, independent_sets.from_bases_matroid(matroid)))
def from_circuits_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by circuits.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by circuits.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, _ = matroid
return from_independent_matroid((E, independent_sets.from_circuits_matroid(matroid)))
def from_nulity_matroid(matroid: tuple[set[T], Callable[[set[T]], int]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by a nulity function.
Args:
matroid (tuple[set[T], Callable[[set[T]], set[T]]]): A matroid defined by a nulity function.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, n = matroid
# r(X) = |X| - n(X), ∀X ⊆ E.
return lambda X: len(X) - n(X)
def from_closure_matroid(matroid: tuple[set[T], Callable[[set[T]], set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by a closure function.
Args:
matroid (tuple[set[T], Callable[[set[T]], set[T]]]): A matroid defined by a closure function.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, cl = matroid
# r(X) = min{ |I| : X ⊆ cl(I) }, ∀X ⊆ E.
return lambda X: min(len(I) for I in powset(E) if X <= cl(I))
def from_flats_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by flats.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by flats.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, _ = matroid
return from_independent_matroid((E, independent_sets.from_flats_matroid(matroid)))
def from_open_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by open sets.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by open sets.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, _ = matroid
return from_independent_matroid((E, independent_sets.from_open_matroid(matroid)))
def from_hyperplanes_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by hyperplanes.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by hyperplanes.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, _ = matroid
return from_independent_matroid((E, independent_sets.from_hyperplanes_matroid(matroid)))
def from_spanning_matroid(matroid: tuple[set[T], list[set[T]]]) -> Callable[[set[T]], int]:
"""Construct a rank function from a matroid defined by spanning sets.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by spanning sets.
Returns:
Callable[[set[T]], int]: The rank function of a given matroid.
"""
E, _ = matroid
return from_independent_matroid((E, independent_sets.from_spanning_matroid(matroid)))
| 34.631206
| 104
| 0.651853
| 723
| 4,883
| 4.325035
| 0.080221
| 0.080588
| 0.092101
| 0.100736
| 0.822514
| 0.790534
| 0.769108
| 0.755357
| 0.730413
| 0.721458
| 0
| 0
| 0.203563
| 4,883
| 141
| 105
| 34.631206
| 0.801749
| 0.491911
| 0
| 0.2
| 0
| 0
| 0.000451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.114286
| 0
| 0.685714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
dae39b1cd0672fc282c2e533d12eeda144d4116e
| 21,245
|
py
|
Python
|
Code/train.py
|
skywolf829/ASMRSR
|
3faae231864abe9df6a06a444bd9610368090413
|
[
"MIT"
] | 4
|
2021-04-12T02:57:31.000Z
|
2021-06-29T09:34:34.000Z
|
Code/train.py
|
skywolf829/ASMRSR
|
3faae231864abe9df6a06a444bd9610368090413
|
[
"MIT"
] | null | null | null |
Code/train.py
|
skywolf829/ASMRSR
|
3faae231864abe9df6a06a444bd9610368090413
|
[
"MIT"
] | null | null | null |
from utility_functions import to_pixel_samples, to_img, PSNR, make_coord, make_residual_weight_grid
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import time
import torch.optim as optim
import os
from models import save_model
import numpy as np
class Trainer():
def __init__(self, opt):
self.opt = opt
def train_distributed(self, rank, model, opt, dataset):
opt['device'] = "cuda:" + str(rank)
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size = self.opt['num_nodes'] * self.opt['gpus_per_node'],
rank=rank
)
model = model.to(rank)
model = DDP(model, device_ids=[rank])
model_optim = optim.Adam(model.parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
milestones=[200, 400, 600, 800],gamma=self.opt['gamma'])
if(rank == 0):
writer = SummaryWriter(os.path.join('tensorboard',opt['save_name']))
start_time = time.time()
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset,
num_replicas=opt['num_nodes']*opt['gpus_per_node'], rank=rank)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
shuffle=False,
num_workers=opt["num_workers"],
pin_memory=True,
sampler=train_sampler
)
L1loss = nn.L1Loss().to(opt["device"])
step = 0
for epoch in range(opt['epoch_number'], opt['epochs']):
opt["epoch_number"] = epoch
for batch_num, real_hr in enumerate(dataloader):
model.zero_grad()
real_hr = real_hr.to(self.opt['device'])
if(rank == 0):
hr_im = torch.from_numpy(np.transpose(to_img(real_hr, self.opt['mode']),
[2, 0, 1])[0:3]).unsqueeze(0)
real_shape = real_hr.shape
#print(real_hr.dtype)
#print("Full shape : " + str(real_hr.shape))
if(model.upscaling_model.continuous):
scale_factor = torch.rand([1], device=real_hr.device, dtype=real_hr.dtype) * \
(self.opt['scale_factor_end'] - self.opt['scale_factor_start']) + \
self.opt['scale_factor_start']
else:
scale_factor = (1/self.opt['spatial_downscale_ratio'])
#scale_factor = 1
#print("Scale factor: " + str(scale_factor))
real_lr = F.interpolate(real_hr, scale_factor=(1/scale_factor),
mode = "bilinear" if "2D" in self.opt['mode'] else "trilinear",
align_corners=False, recompute_scale_factor=False)
if(rank == 0):
lr_im = torch.from_numpy(np.transpose(to_img(real_lr, self.opt['mode']),
[2, 0, 1])[0:3]).unsqueeze(0)
lr_im = F.interpolate(lr_im, size=hr_im.shape[2:], mode='nearest')
if(model.upscaling_model.continuous):
hr_coords, real_hr = to_pixel_samples(real_hr, flatten=False)
cell_sizes = torch.ones_like(hr_coords)
for i in range(cell_sizes.shape[-1]):
cell_sizes[:,:,i] *= 2 / real_shape[2+i]
lr_upscaled = model(real_lr, hr_coords, cell_sizes)
if("2D" in self.opt['mode']):
lr_upscaled = lr_upscaled.permute(2, 0, 1).unsqueeze(0)
else:
lr_upscaled = lr_upscaled.permute(3, 0, 1, 2).unsqueeze(0)
lr_upscaled = torch.flatten(lr_upscaled,start_dim=1, end_dim=-1).permute(1,0)
else:
lr_upscaled = model(real_lr)
if(rank == 0):
sr_im = torch.from_numpy(np.transpose(to_img(lr_upscaled,
self.opt['mode']),[2, 0, 1])[0:3]).unsqueeze(0)
L1 = L1loss(lr_upscaled, real_hr)
L1.backward()
model_optim.step()
optim_scheduler.step()
psnr = PSNR(lr_upscaled, real_hr)
if(rank == 0 and step % self.opt['save_every'] == 0):
print("Epoch %i batch %i, sf: x%0.02f, L1: %0.04f, PSNR (dB): %0.02f" % \
(epoch, batch_num, scale_factor, L1.item(), psnr.item()))
writer.add_scalar('L1', L1.item(), step)
writer.add_images("LR, SR, HR", torch.cat([lr_im, sr_im, hr_im]), global_step=step)
step += 1
if(rank == 0 and epoch % self.opt['save_every'] == 0):
save_model(model, self.opt)
print("Saved model")
end_time = time.time()
total_time = start_time - end_time
if(rank == 0):
print("Time to train: " + str(total_time))
save_model(model, self.opt)
print("Saved model")
def train_single(self, model, dataset):
model = model.to(self.opt['device'])
if not self.opt['fine_tuning']:
model_optim = optim.Adam(model.parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
else:
for param in model.feature_extractor.parameters():
param.requires_grad = False
model_optim = optim.Adam(model.upscaling_model.parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
milestones=[200, 400, 600, 800],gamma=self.opt['gamma'])
writer = SummaryWriter(os.path.join('tensorboard',self.opt['save_name']))
start_time = time.time()
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
shuffle=True,
num_workers=self.opt["num_workers"],
pin_memory=True
)
L1loss = nn.L1Loss().to(self.opt["device"])
step = 0
for epoch in range(self.opt['epoch_number'], self.opt['epochs']):
self.opt["epoch_number"] = epoch
for batch_num, real_hr in enumerate(dataloader):
model.zero_grad()
real_hr = real_hr.to(self.opt['device'])
hr_im = torch.from_numpy(np.transpose(to_img(real_hr, self.opt['mode']),
[2, 0, 1])[0:3]).unsqueeze(0)
real_shape = real_hr.shape
if(model.upscaling_model.continuous):
scale_factor = torch.rand([1], device=real_hr.device, dtype=real_hr.dtype) * \
(self.opt['scale_factor_end'] - self.opt['scale_factor_start']) + \
self.opt['scale_factor_start']
else:
scale_factor = (1/self.opt['spatial_downscale_ratio'])
real_lr = F.interpolate(real_hr, scale_factor=(1/scale_factor),
mode = "bilinear" if "2D" in self.opt['mode'] else "trilinear",
align_corners=False, recompute_scale_factor=False)
lr_im = torch.from_numpy(np.transpose(to_img(real_lr, self.opt['mode']),
[2, 0, 1])[0:3]).unsqueeze(0)
lr_im = F.interpolate(lr_im, mode='nearest', size=hr_im.shape[2:])
if(model.upscaling_model.continuous):
hr_coords, real_hr = to_pixel_samples(real_hr, flatten=False)
cell_sizes = torch.ones_like(hr_coords)
for i in range(cell_sizes.shape[-1]):
cell_sizes[:,:,i] *= 2 / real_shape[2+i]
lr_upscaled = model(real_lr, hr_coords, cell_sizes)
if("2D" in self.opt['mode']):
lr_upscaled = lr_upscaled.permute(2, 0, 1).unsqueeze(0)
else:
lr_upscaled = lr_upscaled.permute(3, 0, 1, 2).unsqueeze(0)
sr_im = torch.from_numpy(np.transpose(to_img(lr_upscaled,
self.opt['mode']),[2, 0, 1])[0:3]).unsqueeze(0)
lr_upscaled = torch.flatten(lr_upscaled,start_dim=1, end_dim=-1).permute(1,0)
else:
lr_upscaled = model(real_lr)
sr_im = torch.from_numpy(np.transpose(to_img(lr_upscaled,
self.opt['mode']),[2, 0, 1])[0:3]).unsqueeze(0)
L1 = L1loss(lr_upscaled, real_hr)
L1.backward()
model_optim.step()
optim_scheduler.step()
psnr = PSNR(lr_upscaled, real_hr)
if(step % self.opt['save_every'] == 0):
print("Epoch %i batch %i, sf: x%0.02f, L1: %0.04f, PSNR (dB): %0.02f" % \
(epoch, batch_num, scale_factor, L1.item(), psnr.item()))
writer.add_scalar('L1', L1.item(), step)
writer.add_images("LR, SR, HR", torch.cat([lr_im, sr_im, hr_im]), global_step=step)
step += 1
if(epoch % self.opt['save_every'] == 0):
save_model(model, self.opt)
print("Saved model")
end_time = time.time()
total_time = start_time - end_time
print("Time to train: " + str(total_time))
save_model(model, self.opt)
print("Saved model")
def train(self, model, dataset):
torch.manual_seed(0b10101010101010101010101010101010)
if(self.opt['train_distributed']):
print("Training distributed across " + str(self.opt['gpus_per_node']) + " GPUs")
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
mp.spawn(self.train_distributed,
args=(model,self.opt,dataset),
nprocs=self.opt['gpus_per_node'],
join=True)
else:
print("Training on " + self.opt['device'])
self.train_single(model, dataset)
class ImplicitNetworkTrainer():
def __init__(self, opt):
self.opt = opt
def train_single(self, model, dataset):
model = model.to(self.opt['device'])
model_optim = optim.Adam(model.parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
milestones=[200, 400, 600, 800],gamma=self.opt['gamma'])
writer = SummaryWriter(os.path.join('tensorboard',self.opt['save_name']))
start_time = time.time()
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
shuffle=True,
num_workers=self.opt["num_workers"],
pin_memory=True,
batch_size=self.opt['minibatch']
)
L1loss = nn.L1Loss().to(self.opt["device"])
step = 0
for epoch in range(self.opt['epoch_number'], self.opt['epochs']):
self.opt["epoch_number"] = epoch
for batch_num, inout in enumerate(dataloader):
model.zero_grad()
in_coords, out_vals = inout
in_coords = in_coords.to(self.opt['device'])
out_vals = out_vals.to(self.opt['device'])
recovered_data = model(in_coords)
L1 = L1loss(recovered_data, out_vals)
L1.backward()
model_optim.step()
optim_scheduler.step()
psnr = PSNR(recovered_data, out_vals)
if(step % self.opt['save_every'] == 0):
print("Epoch %i batch %i, sf: L1: %0.04f, PSNR (dB): %0.02f" % \
(epoch, batch_num, L1.item(), psnr.item()))
writer.add_scalar('L1', L1.item(), step)
step += 1
if(epoch % self.opt['save_every'] == 0):
save_model(model, self.opt)
print("Saved model")
end_time = time.time()
total_time = start_time - end_time
print("Time to train: " + str(total_time))
save_model(model, self.opt)
print("Saved model")
def train(self, model, dataset):
torch.manual_seed(0b10101010101010101010101010101010)
if(self.opt['train_distributed']):
print("Training distributed across " + str(self.opt['gpus_per_node']) + " GPUs")
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
mp.spawn(self.train_distributed,
args=(model,self.opt,dataset),
nprocs=self.opt['gpus_per_node'],
join=True)
else:
print("Training on " + self.opt['device'])
self.train_single(model, dataset)
class TemporalTrainer():
def __init__(self, opt):
self.opt = opt
def train_distributed(self, rank, model, opt, dataset):
opt['device'] = "cuda:" + str(rank)
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size = self.opt['num_nodes'] * self.opt['gpus_per_node'],
rank=rank
)
model = model.to(rank)
model = DDP(model, device_ids=[rank])
model_optim = optim.Adam(model.parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
milestones=[200, 400, 600, 800],gamma=self.opt['gamma'])
if(rank == 0):
writer = SummaryWriter(os.path.join('tensorboard',opt['save_name']))
start_time = time.time()
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset,
num_replicas=opt['num_nodes']*opt['gpus_per_node'], rank=rank)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
shuffle=False,
num_workers=opt["num_workers"],
pin_memory=True,
sampler=train_sampler
)
L1loss = nn.L1Loss().to(opt["device"])
step = 0
for epoch in range(opt['epoch_number'], opt['epochs']):
opt["epoch_number"] = epoch
for batch_num, real_hr in enumerate(dataloader):
model.zero_grad()
real_hr = real_hr.to(self.opt['device'])
real_shape = real_hr.shape
scale_factor = torch.rand([1], device=real_hr.device, dtype=real_hr.dtype) * \
(self.opt['scale_factor_end'] - self.opt['scale_factor_start']) + \
self.opt['scale_factor_start']
real_lr = F.interpolate(real_hr, scale_factor=(1/scale_factor),
mode = "bilinear" if "2D" in self.opt['mode'] else "trilinear",
align_corners=False, recompute_scale_factor=False)
hr_coords, real_hr = to_pixel_samples(real_hr, flatten=False)
cell_sizes = torch.ones_like(hr_coords)
for i in range(cell_sizes.shape[-1]):
cell_sizes[:,:,i] *= 2 / real_shape[2+i]
lr_upscaled = model(real_lr, hr_coords, cell_sizes)
lr_upscaled = lr_upscaled.permute(3, 0, 1, 2).unsqueeze(0)
L1 = L1loss(torch.flatten(lr_upscaled,start_dim=1, end_dim=-1).permute(1,0), real_hr)
L1.backward()
model_optim.step()
optim_scheduler.step()
psnr = PSNR(torch.flatten(lr_upscaled,start_dim=1, end_dim=-1).permute(1,0), real_hr)
if(rank == 0 and step % self.opt['save_every'] == 0):
print("Epoch %i batch %i, sf: x%0.02f, L1: %0.04f, PSNR (dB): %0.02f" % \
(epoch, batch_num, scale_factor, L1.item(), psnr.item()))
writer.add_scalar('L1', L1.item(), step)
writer.add_images("LR, SR, HR", torch.cat([lr_im, sr_im, hr_im]), global_step=step)
step += 1
if(rank == 0 and epoch % self.opt['save_every'] == 0):
save_model(model, self.opt)
print("Saved model")
end_time = time.time()
total_time = start_time - end_time
if(rank == 0):
print("Time to train: " + str(total_time))
save_model(model, self.opt)
print("Saved model")
def train_single(self, model, dataset):
model = model.to(self.opt['device'])
model_optim = optim.Adam(model.parameters(), lr=self.opt["g_lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
milestones=[200, 400, 600, 800],gamma=self.opt['gamma'])
writer = SummaryWriter(os.path.join('tensorboard',self.opt['save_name']))
start_time = time.time()
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
shuffle=True,
num_workers=self.opt["num_workers"],
pin_memory=True
)
L1loss = nn.L1Loss().to(self.opt["device"])
step = 0
for epoch in range(self.opt['epoch_number'], self.opt['epochs']):
self.opt["epoch_number"] = epoch
for batch_num, real_hr in enumerate(dataloader):
model.zero_grad()
real_hr = real_hr.to(self.opt['device'])
real_shape = real_hr.shape
scale_factor = torch.rand([1], device=real_hr.device, dtype=real_hr.dtype) * \
(self.opt['scale_factor_end'] - self.opt['scale_factor_start']) + \
self.opt['scale_factor_start']
s = [real_hr.shape[2], real_hr.shape[3], round((real_hr.shape[4]/scale_factor).item())]
real_lr = F.interpolate(real_hr, size=s, mode='trilinear', align_corners=False)
hr_coords, real_hr = to_pixel_samples(real_hr, flatten=False)
cell_sizes = torch.ones_like(hr_coords)
for i in range(cell_sizes.shape[-1]):
cell_sizes[:,:,i] *= 2 / real_shape[2+i]
lr_upscaled = model(real_lr, hr_coords, cell_sizes)
lr_upscaled = lr_upscaled.permute(3, 0, 1, 2).unsqueeze(0)
L1 = L1loss(torch.flatten(lr_upscaled,start_dim=1, end_dim=-1).permute(1,0), real_hr)
L1.backward()
model_optim.step()
optim_scheduler.step()
psnr = PSNR(torch.flatten(lr_upscaled,start_dim=1, end_dim=-1).permute(1,0), real_hr)
if(step % self.opt['save_every'] == 0 or True):
print("Epoch %i batch %i, sf: x%0.02f, L1: %0.04f, PSNR (dB): %0.02f" % \
(epoch, batch_num, scale_factor, L1.item(), psnr.item()))
#writer.add_scalar('L1', L1.item(), step)
step += 1
if(epoch % self.opt['save_every'] == 0):
save_model(model, self.opt)
print("Saved model")
end_time = time.time()
total_time = start_time - end_time
print("Time to train: " + str(total_time))
save_model(model, self.opt)
print("Saved model")
def train(self, model, dataset):
torch.manual_seed(0b10101010101010101010101010101010)
if(self.opt['train_distributed']):
print("Training distributed across " + str(self.opt['gpus_per_node']) + " GPUs")
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
mp.spawn(self.train_distributed,
args=(model,self.opt,dataset),
nprocs=self.opt['gpus_per_node'],
join=True)
else:
print("Training on " + self.opt['device'])
self.train_single(model, dataset)
| 45.885529
| 105
| 0.522664
| 2,496
| 21,245
| 4.244792
| 0.080529
| 0.081265
| 0.018405
| 0.016989
| 0.914394
| 0.909108
| 0.903634
| 0.903634
| 0.903634
| 0.899387
| 0
| 0.032398
| 0.349117
| 21,245
| 463
| 106
| 45.885529
| 0.733801
| 0.007625
| 0
| 0.890625
| 0
| 0.013021
| 0.09749
| 0.002182
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028646
| false
| 0
| 0.033854
| 0
| 0.070313
| 0.067708
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dafda4e1c0d82ff6427d77ee3473f5fe21b464f7
| 6,248
|
py
|
Python
|
system/indy-node-tests/TestViewChangeSuite.py
|
ashcherbakov/indy-test-automation
|
23fc7bebee87da6cbf16aa58e2d29d38cfc642cd
|
[
"Apache-2.0"
] | null | null | null |
system/indy-node-tests/TestViewChangeSuite.py
|
ashcherbakov/indy-test-automation
|
23fc7bebee87da6cbf16aa58e2d29d38cfc642cd
|
[
"Apache-2.0"
] | null | null | null |
system/indy-node-tests/TestViewChangeSuite.py
|
ashcherbakov/indy-test-automation
|
23fc7bebee87da6cbf16aa58e2d29d38cfc642cd
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import asyncio
from system.utils import *
@pytest.mark.usefixtures('docker_setup_and_teardown')
@pytest.mark.usefixtures('check_no_failures_fixture')
class TestViewChangeSuite:
@pytest.mark.asyncio
async def test_vc_by_restart_primary(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num
):
trustee_did, _ = get_default_trustee
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
primary_before, _, _ = await get_primary(pool_handler, wallet_handler, trustee_did)
p1 = NodeHost(primary_before)
p1.stop_service()
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary_before)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
p1.start_service()
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.skip('INDY-2023')
@pytest.mark.asyncio
async def test_vc_by_demotion_primary(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num
):
trustee_did, _ = get_default_trustee
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
primary_before, primary_alias, primary_did = await get_primary(pool_handler, wallet_handler, trustee_did)
await eventually(demote_node, pool_handler, wallet_handler, trustee_did, primary_alias, primary_did)
primary_next = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary_before)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
await eventually(promote_node, pool_handler, wallet_handler, trustee_did, primary_alias, primary_did)
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary_next)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.asyncio
async def test_vc_by_demotion_last(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num
):
_alias = 'Node7'
_did = 'BM8dTooz5uykCbYSAAFwKNkYfT4koomBHsSWHTDtkjhW'
trustee_did, _ = get_default_trustee
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
primary_first, _, _ = await get_primary(pool_handler, wallet_handler, trustee_did)
await eventually(demote_node, pool_handler, wallet_handler, trustee_did, _alias, _did)
primary_next = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary_first)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
await eventually(promote_node, pool_handler, wallet_handler, trustee_did, _alias, _did)
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary_next)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.nodes_num(8)
@pytest.mark.asyncio
async def test_demotion_of_backup_primary_with_restart_with_vc(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num
):
R0_PRIMARY_ID = 1
R1_PRIMARY_ID = 2
R2_PRIMARY_ID = 3
hosts = [NodeHost(node_id + 1) for node_id in range(nodes_num)]
trustee_did, _ = get_default_trustee
await check_pool_is_functional(pool_handler, wallet_handler, trustee_did)
pool_info = get_pool_info(str(R0_PRIMARY_ID))
primary_r2_alias = get_node_alias(R2_PRIMARY_ID)
primary_r2_did = get_node_did(primary_r2_alias, pool_info=pool_info)
await eventually(demote_node, pool_handler, wallet_handler, trustee_did, primary_r2_alias, primary_r2_did)
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, str(R0_PRIMARY_ID))
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
restart_pool(hosts)
await ensure_pool_is_in_sync(node_ids=[h.id for h in hosts if h.id != R2_PRIMARY_ID])
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.nodes_num(8)
@pytest.mark.asyncio
async def test_demotion_of_backup_primary_with_restart_without_vc(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num
):
R0_PRIMARY_ID = 1
R1_PRIMARY_ID = 2
R2_PRIMARY_ID = 3
hosts = [NodeHost(node_id + 1) for node_id in range(nodes_num)]
trustee_did, _ = get_default_trustee
await check_pool_is_functional(pool_handler, wallet_handler, trustee_did)
pool_info = get_pool_info(str(R0_PRIMARY_ID))
host2 = hosts[R1_PRIMARY_ID - 1]
host2.stop_service()
primary_r2_alias = get_node_alias(R2_PRIMARY_ID)
primary_r2_did = get_node_did(primary_r2_alias, pool_info=pool_info)
await eventually(demote_node, pool_handler, wallet_handler, trustee_did, primary_r2_alias, primary_r2_did)
restart_pool(hosts)
await ensure_pool_is_in_sync(node_ids=[h.id for h in hosts if h.id != R2_PRIMARY_ID])
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.nodes_num(4)
@pytest.mark.asyncio
async def test_multiple_vcs(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num
):
trustee_did, _ = get_default_trustee
for i in range(10):
primary, alias, target_did = await get_primary(pool_handler, wallet_handler, trustee_did)
p = NodeHost(primary)
p.stop_service()
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
p.start_service()
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_state_root_hashes_are_in_sync(pool_handler, wallet_handler, trustee_did)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
| 45.605839
| 114
| 0.746639
| 846
| 6,248
| 5.024823
| 0.111111
| 0.103505
| 0.159962
| 0.225829
| 0.906845
| 0.902376
| 0.895554
| 0.886144
| 0.878382
| 0.845213
| 0
| 0.010059
| 0.18854
| 6,248
| 136
| 115
| 45.941176
| 0.828402
| 0
| 0
| 0.635514
| 0
| 0
| 0.017286
| 0.015045
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028037
| 0
| 0.037383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
979da7da0ef01e9552b446bd485ddf5844b2f75e
| 10,871
|
py
|
Python
|
ImageSource.py
|
puwow/pydatalib
|
b4cb309135996cbefedbe17931bf00d0b1cdcf34
|
[
"Apache-2.0"
] | 1
|
2021-03-10T17:07:57.000Z
|
2021-03-10T17:07:57.000Z
|
ImageSource.py
|
puwow/pydatalib
|
b4cb309135996cbefedbe17931bf00d0b1cdcf34
|
[
"Apache-2.0"
] | null | null | null |
ImageSource.py
|
puwow/pydatalib
|
b4cb309135996cbefedbe17931bf00d0b1cdcf34
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8 -*-
import wx
import base64
class ImageSource:
images={'address': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAArUlEQVQ4T2NkwAEEJt92+JCregCXPEycEZsCoak3GxgYmer//WVwJGQIXgMYGBkaGP79w6rmXbZ6A8hyvAYwMjEE/P/HsAGHN069y1I1BxsgPO129n8GhinoCv8xMDgwMTDgDQdGmH+x2QIygJmBYS7QcGUs8hAXEDKA6f8/RwyX/WM6AAtcgl74kKV6EF9U4o9GoBcoNIAFaIAiBS6gNCFRnBKHsAGgeCc2OwMAC51skx9KbRIAAAAASUVORK5CYII=', 'bank': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA/ElEQVQ4T2NkoBAw4tIvMvmGyT9mlnYGhv9MQDXV77JUT2BTi9UAwel30hn//+8BauCBavoGNWQCuiEoBvBOeSLMyvi9i4GRIQmHy1ayMjLmvcxUeQWThxsgNP22O+N/hqn/GRiU8QbLf4aHQHX5b3NUN4LUgQ0QnHanlZHhfxUp4QnU2Pk2S7WCUWDybQcmZob9pGiGqf33l8ER7gXBabf3M/7/d/BdtnoDzFBgyIPlhabd/g9S/CFX9YDQ1JsN/xmZ7N9nqTrCvQDxxqgBFIUBKNQZmRnqQbHw7x/TAVCggqIWFPIYbKZ/DqBY+P+XoREUK9RLB+QkJJAeAJsqzo3nhQh9AAAAAElFTkSuQmCC', 'car': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABSElEQVQ4T62SMS8EQRTH39uVKDR27xothkRCwwe4i1Ki0IlGeZnLqTQqrtLoLruoVKJSCe2dD6AhSG5OIlpuV4JEws4zI0yysSY5a5pJJu/98vu/eQg5D+bshxTAC9tzALhqh9JWzMeOv2tSgEIo9gnwVCbUzoI4fTCMBLNdzpZ+ALzGzSS68jzizBrLDwVR4kzFtZELDTHFhUBsEpIvEzywRUCXFhzCl26VrRmAv93ZA6LlngZKsh5Vxzc+DbSWApwooWvlVFFPA7/AngDxEKTsV/eijmsAMoHyY421vFA01WMpC0AArZiz8mBDlBwXmilAboOe8n8VGwOthC6sa3UdxQbT6joKJVDXkc03eoEI1ES4AzjzwEfPsiD+jpgACZeq6Ugt03xqD4q7t0OS3ryowq5sBsWwM/36TnfPK+w+BfjLDP4F8AFe/IURxOanEgAAAABJRU5ErkJggg==', 'company': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABY0lEQVQ4T6WTMUiCURSFz33/H7Sl5lJTQwpBFNQeOgoRNATRkFFRVDQ3als0RGApLg21uTQFNWV7RJupDQ0hRIGOQvpu549GLX968HhveOe795zLE/xzSU/6/H1fqDWwDNEkxAQsPucaW2MvnvZXwGCuPK/qrEHtO59eAXacgBR1TdtGorEbKXYEBLPlhKhZgcGZWp2s70QPvWqh03L6BwAC4h0BgVxpxMBNQrFEekWBWd7frMWiMTbWERDOPA1bx9mkoVVYLUGkAJUbiE2w9bxX3avYFRDIVGPiIFXfjsTDJ9UpFSxwz0BNgZDjngHaxr5xcKkQhqN3Cm0KJOcL4Eqr9i0Qd0IE/czgwhfAGOSZRZSiR4Z3zvuRP4CDUQb27Li2qjDrhKT9AYxucArTbL3GMWYpLvgEYI/eP1j5lZN48A0QY4YYYdBAgipa8Q3gGG+9lmmhyOOaNg7+tNDTj+zy6As74/QRkYgncAAAAABJRU5ErkJggg==', 'creditcard': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAhUlEQVQ4T2NkoBAwUqifgToGKE794MbAyFxJimsYGZl672VybwG7QGHa5ytAhjYpBjAwMD64n8WjCDZAzcjiP2maIapvnTvBSB0DzBd8IeiCV98wldzP4oW4AGSAODf+CLn8+h+GL+EGKE77TNAF2MKIegZQHI0UJyRy0gBMD3XyAiUuAACjwzIRHNfLcQAAAABJRU5ErkJggg==', 'time': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABNklEQVQ4T2NkoBAwouvnnflMhPXvl3IGBkY9hv8M+mB5RoaLDAz/L/1m5un8nC71BlkPigGC0+7ZMjL82QDUIYTdYf/f/WdgCXifpXQYJg83QHjqrYT/jIzzkTX+Z2A4AHEAgwOKrf//J77NVlsAlWNgEJh+XYHpH/MVBkZGbmSF77JUGQWn3d6PbgDD//9f/zH91fmQqfkA7ALh6Xc6////X4bubJwGgFzFyNj1NlOlHGLAtNubgM71JckABobNb7NU/cAGCE27/RRISZFiAFDtM6ALpQkaAAlcpnpgNCqgWYAwAJcXgN7aBPRsFyfTh/M//vGXAtNFA1L0IbyAKxDhioGGMPz5O/8/M9N1ZDF4IOKKRuyJCSiKHo3gmMCSkHAZwIiekGAKKUrKMEMoykw4/YxHAgCZwJsRKwobkgAAAABJRU5ErkJggg==', 'pcard': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA7UlEQVQ4T6WTsQ7BUBSG/1OJF0AHk6cws1rFxGA2GKWbToiRxMBgYTH0FXgbUuIBSHr8bVQaKmlzO93e/P93/3POvQLDTwz9+ABsRwcIYGcCWvD9maxCbQSwR7rjqpvJHIsUe38uPSk72rAUx1zmtzgQNNMAHhQXJmpRVwu1Cpy+D2D0RirAsmBfpnKtONoXxTYyKtyfhAI3FRAUUL1N5ExAh4BDvgSKOw3L4hOLRxFrrtvZE9DMrpaSUTkdl70YJ3sQ1h5rvkvwuLFMAjidOv+H7MHms8/a/wFyTzJKYHyR4mONrnLu7AmD8Wt8AQSOamnPBqgAAAAAAElFTkSuQmCC', 'network': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACFElEQVQ4T4VTPW8TQRCdWZ+IEAWXmCISBYeE02J+AS6gA+GOkkTpHCEcShoc0SAKAkK2JQpiahrnHzi/gEMUkJgopqPJ6YIUCdBlhze7PudsobDS6WZ3571587FMM6vc3lvWIzGmytbGwlQzzD1rqWb/BK/S9atpEcL5JmzvVeG4hYMjYY5J5BEJbRDTU/jExLyDswdMvHLYuNbPcY4g7H6JjAQf1SyAIGNCkNuq7gqLrByuLfUU6wgudYe1TOAucuSYmS/6f/Y9oCByd7p+B59oLrue25oOa86QvDUGvgZJXaO4fVGB17yOb9Ob8j5pLC3zfHfYZ6F7/rDo8D8CSpNGZZ4XOkMv718RZxXM7K3IDdb8c3xG2UhzPmtPthQGhkP1yX4FsSrQvvqizUTADGyP09PitsTyDyZZZEPnIfu+YWpyuTMcYHPzjBSa6MozsD9BgGP47qN1NdjvkrXK6nQXvILmtCKJ0JkRsdGBwuIRyJCmJOhC2c2BqnAZ6ASqi0jVK5IeRiDyYAfsi9A5gCoIdBtqBn4SNw/C0lzWt8ohtDMeX1cTHBxjfF+A6A2TueuiMz3G3UtAf07eQrnzrQ6nnkYBIHJ10ZSMfMb/rRAnIH9uhS+UjNzCPcjow4TAKXFvotSC3BgXdcgdkLbMnqSnNfCV0IVB4imC0yufVvHpLrR3WyC5AxVfLdNlOaGN9GFl8BfN+ff2Nm6FDgAAAABJRU5ErkJggg==', 'money': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABHUlEQVQ4T2NkoBAw4tMvMPm2A0j+Q67qAVzqcBogNPVOCQPj/26wxv+Mpe+yVXqwGYJigNDUmw3/GZnsQQqBEmDbYeA/AwPYFYyMDAfeZao2wsRRDBCcdns/isb//yAKGZnqkQ16n6XqSIwBE/79/78QpJCJkTEeSBWAfQN0CVYDkP38/z/D699/GbTZmP9lMzAx/Qc5WWjanftA7QroYQL2gsi0O8b/GP6fQfbzuyxVRuFpN13+MzACw4SxBj0AmRgYTd5kqZzFaQBEw/8WRob/B99mqe8RmnYb6HoEQDEAJIwSbQyMD95lqSiCYgUk9+sv01RWZoarwBgQxeoFmLlosUBaIIIMoTgaKU5IyIFEVlJGjyqKMhO6Ybj4AFEXjBHT0bEiAAAAAElFTkSuQmCC', 'work': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABfElEQVQ4T42SvUtCURjG3zeN2tLbXyB47Wu0aJCmqMDRLSoiiLJ7iWgPsr+glqtF6FDRIDQ0tFRL2BQOrfmxRmAe7hRUeN5eQ+1evdc8yzk8PPzO+/EgWI5ivCQA+/atmu1N8kDoIwmrhu1mJVnMshgExKsaypwZV3NKqnwHJAeEFppp93cA6gYLJOMFyH4DPYgtdcypMkdAA5LnO1x/C0119XUDUPNHRBnj2chqXL3usYXCGgBmmmYCPAMgH316V83dgNl1iI3yH/mO2IxIO0AwXdVCS70AWuW32gA5R9DH28ANoQdP//S2pvzH5UWU8tJh4mnWfNxaWGjBgCtASZXugWjWMUwEC4Awzls5cgckix3lt2CEkUGv+fy6OfnhCBg2CjHiBLpGGSDNv6+7DlExCjcc4WgXQEeobEFSkqUn3veUG4AIKh5Zi75vj9ZT+nvsSTzJ9/trQ4cs6k5b8NDXXkWfePs/B0ZxnhCWGbTCEz1Hgguhq7dOlf0Ar+SAEV5AeKYAAAAASUVORK5CYII=', 'coord': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACAElEQVQ4T4VTPUwUURCeeQuVhdyBJhoLIrsSsDAmF0pDYUgk0ki00RrYvQalkYKIhgQoFJq9O3touGijiWhFYjQx4ack8oBgIURzt4sJjeK9cd7eLt4tS9jky85738w3M2/eQ0j4WgpbGUWUBYLugEZYEohuaahtOe6O8Y20+3UcUDxJEgZST71s+3gtVyeQym/dRlJvjhw4oFrBf0FC0efbbW8jnzqBdE4+Z+KRJgnhgW9b89pO5eV9JJgLg154jjVygsDmKw69o0lhNF4sDbbuabvl5c4FVTncrQbha88x+0+qYIGJuyH5kDPNapsrG+bfTLhf5P17iQLNrpzk0h9HJAEsBTkhnIa2CabKWWs0UaApL68Lgo9MnokcYv8DhXBj37bWEgWCct2NMUB8liSAgGNlx5yo5Y7dg2rPG1+42K6YyGfP+NUNg5nD0wVc2cONv691JMBe3zHfxSurq+Bs7lvKgD83eZQMZMDl6iFSgS9TUQH89m3zU2IF6YLsBAV6jFcZJcZ3xjXGARniElbUB7Z1Wys8xsyxQwzegBA9/IBm2KGoHZpzcpSzKt+xpvU65co+gTBAoFY9pz14L0ctnHM3zQqSDJQJFnngP9j6GWY6z64MuBUEEXWWs1fW6wT0oim/3mpQwxBfIN17BE1ta3C27Qr+LezbHTtRC/8AfWqoEc4EWykAAAAASUVORK5CYII=', 'person': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABTklEQVQ4T6WTQS8DQRiG3285ObEVJ8fdEhLREP0H/AOuwkV30Zu4bfcmbsVuXYgr/4B/UCEVCaG7RyfR5eTEfmaLVrvTLbGn2fne75l33pkh/POjTv39JS+jAJmoHgKVl5xekWmlALXkWWDaALjvs4leQbwd5HS7HRIDqI43C8Kp1BljLjD1s5+1GCDl+msM3pHbpfWaoe0mAoT9JTAOpA4Iy2Ibh4mAQdefCsEXMoACmn4ytMtEQFRU970xEf2JGI5/iW+gYD5Y0W+7hphyqotMlBfCyTbxFTEXa2b6qKMD1bkvgBQr8W5xaAfmSOFb0ziFpPTjtpun0QCorsd/udWBodd7mw72qllWqCjmsl1AZQo5X1tNl1sA9SaLe9Uhf0uMFgR6uAXEeBD/x8Gjtgmb3mIZtK864N5NEHpmonnG+/mzMXotc9bxNf42jw+ewWUR/TFDoAAAAABJRU5ErkJggg==', 'platform': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABGklEQVQ4T2NkoBAwwvQLzbit9e83gxgx5jGxMrx6l6F6DaQWbIDA5NsOTMwM+4nRDFPz7y+D44dc1QNgA4Sm3mxgYGSqJ9KAf0B1TAz//zW+y1ZvwDDgPwPDAcb//w5iGMbIJA2UUwFqcADLYTEgBCgsC/RU37tM1UZsrhGafrue4T9DGtCgWyBL4C4AhQGyBpDfsBmATR1mGECdhtUFyGGFKwxgfiPdACQdIL/hNABNHcILIAkmJmD4MDDgDURwDAAD+/+/J1ijkeH//+X//jHOQncBM9N/BaDpbgyMjJG4opHYhAQxGzkQBaff9QHG62YiUyJEPyOT7/tM5S2IzDTtThrD/79SRBnCyPzsXZYK2JtwA4jSiEURAG7bkBHc7fayAAAAAElFTkSuQmCC', 'doc': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAuklEQVQ4T2NkAIG+Dz0M/xmCgSwFMJ8weMDAyLCWoUighJGh/70Cwz/G+4T1YFHB9F+RkaH7vQMDE+N+LNKfGP799weLMzFuBJJ8GGr+/XfEZ8BZhmIBE7Cm3g9ngKQxqQYwAF3QCHVBPVYvEnAB4WABG0AhgIQBMyN2JxIy/O//RnyBSEg7KIzwxsKoAYRDABqI/V8lGf79fkaMegw1TKxSkITU82EqMHuGAlmiRBr0Gpj9VzOUCGQDAO0RVEruCSX5AAAAAElFTkSuQmCC', 'phone': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABj0lEQVQ4T6WTPUjDUBDH714X++Vq66SLDg4OQsFBaBHUwaGNbcHBxUHcHE2LYgVpmm6OnXWymBYcRBHaQXBQQQSXOpcObjbp5jtfoo2pqR/QBw/y7u797v/+RxAGXDjgfXABEvnXWURcBMbCQBC2GiC0gPMWEV1UssM3zqY2QMrra8hwiwBmflMlLtwTp0MtGzj6YIu1ohonRJCyLxI0CfFZJB+I+IhQNC5yY2KHujWIUD7d9qctQKLQqSFQtJvkxGPVTLDuVBJX2lGGrNaNEWC9IvtiFiCu6AuILGNJImp4iO+Us8EXJyBV1ENvhPtIOMFFQihTqpnApctE13N6DSlpsn+zr4lmMFk0ljmHM0fBk/ie+jpjQ5N9kz8CJNUoidFt2O9kLAqcck5/CNhcRfZe22Y6aVLBaIrzqBkTUjGudiLTHe/d45BxLCax+ll7IHK7LkBS1ec54ZUTSIRpZLQuVC054rcCEHEBJKWdA2R7vZ71P5nqXABzzoiePwEI/FyTA8W+Hvyn+/eagf/Gd3s+iRFlC0RJAAAAAElFTkSuQmCC', 'book': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABUUlEQVQ4T72Tv0vDUBDHv/cSEEoXf4Do3NDB2c2lo6CIg6CL4qYp+ge4GHfX5u8QB1frLIKTWKrgYqfSVujiS3Pne9XGtiQQHDx4vFzu3ed9c5cjGJutva4R8Zl5XLf+uHV8j6w/V2sEIHX+E3uAwn7nyHsaBufD5rUAm9PJ1s8AQEBXXb+0/U0PmyY/3bIBqHd9r/I/AKNwB8IrYzUwn5BTgQhCIhwYwOWfAL+VkXuAVkd+bgVZxc0CPJpALytp+v1kFwiBAu5i4Vgx6QEoUg5pJyYdSRS5LunPSGk37keFwqJutZY0LmiQtNH221T73dyynEcBx6j0Trx6AlA8KLNyG3mS7ZkpwMuNIziNgS0h2cgDIaK3znHpcGJQhHnPDMkHQRUBKY52EZ5JnZNqORgCrC2Ez2UmZzf1dhY2/0DfLgEPd8esdrV0mwDyyE478wVNPbMRwGhoFQAAAABJRU5ErkJggg==', 'color': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABm0lEQVQ4T6WSP0vDUBTF703sKhpRRBcRU0FXBweHTu1SHKR2cVQpTSHg4ORguji4SlqR1g+gdLPgZv0M4lBTXRT/m6LgoMT3vEnT2NamFSwUHnnn/u65512Ef/6wXf2gXpmwgIcR2JB9zxHv0cIjU5VvWvVNAGnXmAIGeRLN+hg7NBU53njnAdziA7qcrgs4QAk5OwUUNhuKmiA/gIxxTKJIU2fO0mZqUpOyxhsgufpicQeGoJlJOW1rHYCkG8t0ynXJk6yjSonMke5WEAMzz4mxOxdQ1lpstmchjzELXwQRToDDipmS8w6gP3OxjoDbXV+URgJB4FSskZM9Uwkmag6yRoQ+7tNxpBPECRWh5AKKBIh6IfbmrqXA58cSA3ZVVYJFSf89lgMQIEVPfU7nraoib7RdJNtF344Rcma1F4nDE4oQos6Lte52/DxmJoMFX8BAtjzKueBtHmWk0vBRKg3T39sFX4CTTaZSoP4Lrbn0BMThx9XxB28P/IJzXaRphHlEfEfgZ5YFa6+qfFmv6eig67PWN/EvQj/NN4rukRG0fAMkAAAAAElFTkSuQmCC', 'qrcode': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAeklEQVQ4T2NkoBAwUqifAW6AwOTbDkxM/xyQDfz3j+kAiI9N/EOuKlgOboDQ1JsNDIxM9Sgu+v+vEczHIv4uW71huBlAcSDiMgAW2riim3AsMDOtQtH8918YiE9aLGCxfhAagCsQmVgZXmELwHcZqtdQUiK5mYri3AgAOhFwEYz/MPEAAAAASUVORK5CYII=', 'vdata': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACA0lEQVQ4T6WTT08TQRjGn5lut+zWYrulxiJt0CZoQ/RkMF44SaopJhw0wtmjn8GDFz8AF08cOPgJaoqSEAgVsEktlT2YaBREgzEKCXT7Z7u7M2430nalJSbOZTLvPO/vfd/JMwT/uUivfFVVk2fD4duxaHSWEGL10rkAm9vbQUHTphnnMyBkPBIdfE8pHQZIhoBlvEBGUZTDTpgD2NzamrKrTNuHB52XfwBJd3WSJRQZw7JeDIXD3xzAO1Ut29uZTuEOkVHyDx6MS6aSFM1uE2jnlVDgBCBPQtggQXwi/lbSOQ/DWJ+BGz4Dl9uwk4B8TcZroqAh+bBHJVfVITSA3T08uiQiMSA377oD5g4VJzHu55BC/hpnTNI/f0WxuOPE52dG/w1wXF5QS/hRNlrdnApYq/gwX464H6xYwH6dt2LPphK4GnO6bI+QLxS4Vqni5U+GXP81cNK2R+3NOqrMdoC9vDAxRj8gofRh5OKwvlYqxhzlSi5337DYk7quJzUuYKEs42N/AowKqG6so8a9uCL+wp24gPhAELIsLdWPjh6m0+kvLicuLi9ftw01W9cbNw90E4uVAL7v7iN9wcJIJADq8Sxwy3w8mUq9PZ6p61/IZrMRUZbndMNMMcZEW7QKSp5OTky8+ttRPT9TU2iDfJZHvHc3det5Nys2Y6cCeiV1xn8D3XDHnxszhaYAAAAASUVORK5CYII=', 'code': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAY0lEQVQ4T2NkoBAwUqifgVFw2u39IEMY//87CDPsXbZ6A0z8fZaqI4gNk//PyGQPUgcXF5p2+z9Y4/9/jcgGwMTfZakygtkweUameqCGAyADQOIQyVEDRsNgwNMBxXmB0twIAF0ZIjhG0cdqAAAAAElFTkSuQmCC', 'file': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAzklEQVQ4T2NkgAKhqTcbGBiZ6mF8GP2fgeEA4x8Gfwbm/zPeZatFocszwg2YdhuoFjv495fBkZGZAWz4+yxVR2RVJBkAVOwAchGyIUQZALQxDIhDgZpFQbYz/v//HOYdYg0AaUv/9/f/LbDz/7Bc+FCo+AFsGDFhgC1gYd7AZsDRd1mqNtiCU2ja7SNAcWuQHFANWC9WF4ACCpsBoECEidPUAMq9gCtBIYvj9QIo5SErZmJm2I9uKG0NoNgL5BmAIztjNez/v8Z32eoNIDkAMTpyES6bUNMAAAAASUVORK5CYII='}
@classmethod
def get_bitmap( cls, key ):
if cls.images.get(key) is not None:
return wx.Bitmap().FromPNGData( base64.b64decode(cls.images.get(key)))
else:
return wx.Bitmap().FromPNGData( base64.b64decode(cls.images.get("vdata")))
if __name__ == '__main__':
print(ImageSource.get_bitmap("car"))
| 724.733333
| 10,451
| 0.938
| 412
| 10,871
| 24.725728
| 0.815534
| 0.086581
| 0.003534
| 0.002945
| 0.010209
| 0.010209
| 0.010209
| 0.010209
| 0.010209
| 0
| 0
| 0.127376
| 0.012786
| 10,871
| 15
| 10,452
| 724.733333
| 0.821841
| 0.00184
| 0
| 0
| 0
| 1.333333
| 0.946267
| 0.934562
| 0
| 1
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.583333
| 0.083333
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
8af9a971d56324defec0953d995fe55adc6ca21e
| 118,605
|
py
|
Python
|
openapi-python-client/openapi_client/api/external_task_api.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
openapi-python-client/openapi_client/api/external_task_api.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
openapi-python-client/openapi_client/api/external_task_api.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ExternalTaskApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def complete_external_task_resource(self, id, **kwargs): # noqa: E501
"""complete_external_task_resource # noqa: E501
Completes an external task by id and updates process variables. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.complete_external_task_resource(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to complete. (required)
:param CompleteExternalTaskDto complete_external_task_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.complete_external_task_resource_with_http_info(id, **kwargs) # noqa: E501
def complete_external_task_resource_with_http_info(self, id, **kwargs): # noqa: E501
"""complete_external_task_resource # noqa: E501
Completes an external task by id and updates process variables. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.complete_external_task_resource_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the task to complete. (required)
:param CompleteExternalTaskDto complete_external_task_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'complete_external_task_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method complete_external_task_resource" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `complete_external_task_resource`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'complete_external_task_dto' in local_var_params:
body_params = local_var_params['complete_external_task_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/complete', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def extend_lock(self, id, **kwargs): # noqa: E501
"""extend_lock # noqa: E501
Extends the timeout of the lock by a given amount of time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.extend_lock(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task. (required)
:param ExtendLockOnExternalTaskDto extend_lock_on_external_task_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.extend_lock_with_http_info(id, **kwargs) # noqa: E501
def extend_lock_with_http_info(self, id, **kwargs): # noqa: E501
"""extend_lock # noqa: E501
Extends the timeout of the lock by a given amount of time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.extend_lock_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task. (required)
:param ExtendLockOnExternalTaskDto extend_lock_on_external_task_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'extend_lock_on_external_task_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method extend_lock" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `extend_lock`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'extend_lock_on_external_task_dto' in local_var_params:
body_params = local_var_params['extend_lock_on_external_task_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/extendLock', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def fetch_and_lock(self, **kwargs): # noqa: E501
"""fetch_and_lock # noqa: E501
Fetches and locks a specific number of external tasks for execution by a worker. Query can be restricted to specific task topics and for each task topic an individual lock time can be provided. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fetch_and_lock(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param FetchExternalTasksDto fetch_external_tasks_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[LockedExternalTaskDto]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.fetch_and_lock_with_http_info(**kwargs) # noqa: E501
def fetch_and_lock_with_http_info(self, **kwargs): # noqa: E501
"""fetch_and_lock # noqa: E501
Fetches and locks a specific number of external tasks for execution by a worker. Query can be restricted to specific task topics and for each task topic an individual lock time can be provided. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fetch_and_lock_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param FetchExternalTasksDto fetch_external_tasks_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[LockedExternalTaskDto], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'fetch_external_tasks_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method fetch_and_lock" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'fetch_external_tasks_dto' in local_var_params:
body_params = local_var_params['fetch_external_tasks_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/fetchAndLock', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[LockedExternalTaskDto]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_external_task(self, id, **kwargs): # noqa: E501
"""get_external_task # noqa: E501
Retrieves an external task by id, corresponding to the `ExternalTask` interface in the engine. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_task(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to be retrieved. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ExternalTaskDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_external_task_with_http_info(id, **kwargs) # noqa: E501
def get_external_task_with_http_info(self, id, **kwargs): # noqa: E501
"""get_external_task # noqa: E501
Retrieves an external task by id, corresponding to the `ExternalTask` interface in the engine. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_task_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to be retrieved. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ExternalTaskDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_external_task" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_external_task`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExternalTaskDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_external_task_error_details(self, id, **kwargs): # noqa: E501
"""get_external_task_error_details # noqa: E501
Retrieves the error details in the context of a running external task by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_task_error_details(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task for which the error details should be retrieved. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_external_task_error_details_with_http_info(id, **kwargs) # noqa: E501
def get_external_task_error_details_with_http_info(self, id, **kwargs): # noqa: E501
"""get_external_task_error_details # noqa: E501
Retrieves the error details in the context of a running external task by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_task_error_details_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task for which the error details should be retrieved. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_external_task_error_details" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_external_task_error_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/errorDetails', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_external_tasks(self, **kwargs): # noqa: E501
"""get_external_tasks # noqa: E501
Queries for the external tasks that fulfill given parameters. Parameters may be static as well as dynamic runtime properties of executions. The size of the result set can be retrieved by using the [Get External Task Count](https://docs.camunda.org/manual/7.13/reference/rest/external-task/get-query-count/) method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_tasks(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str external_task_id: Filter by an external task's id.
:param str external_task_id_in: Filter by the comma-separated list of external task ids.
:param str topic_name: Filter by an external task topic.
:param str worker_id: Filter by the id of the worker that the task was most recently locked by.
:param bool locked: Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task.
:param bool not_locked: Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task.
:param bool with_retries_left: Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task.
:param bool no_retries_left: Only include external tasks that have 0 retries. Value may only be `true`, as `false` matches any external task.
:param datetime lock_expiration_after: Restrict to external tasks that have a lock that expires after a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param datetime lock_expiration_before: Restrict to external tasks that have a lock that expires before a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param str activity_id: Filter by the id of the activity that an external task is created for.
:param str activity_id_in: Filter by the comma-separated list of ids of the activities that an external task is created for.
:param str execution_id: Filter by the id of the execution that an external task belongs to.
:param str process_instance_id: Filter by the id of the process instance that an external task belongs to.
:param str process_instance_id_in: Filter by a comma-separated list of process instance ids that an external task may belong to.
:param str process_definition_id: Filter by the id of the process definition that an external task belongs to.
:param str tenant_id_in: Filter by a comma-separated list of tenant ids. An external task must have one of the given tenant ids.
:param bool active: Only include active tasks. Value may only be `true`, as `false` matches any external task.
:param bool suspended: Only include suspended tasks. Value may only be `true`, as `false` matches any external task.
:param int priority_higher_than_or_equals: Only include jobs with a priority higher than or equal to the given value. Value must be a valid `long` value.
:param int priority_lower_than_or_equals: Only include jobs with a priority lower than or equal to the given value. Value must be a valid `long` value.
:param str sort_by: Sort the results lexicographically by a given criterion. Must be used in conjunction with the sortOrder parameter.
:param str sort_order: Sort the results in a given order. Values may be asc for ascending order or desc for descending order. Must be used in conjunction with the sortBy parameter.
:param int first_result: Pagination of results. Specifies the index of the first result to return.
:param int max_results: Pagination of results. Specifies the maximum number of results to return. Will return less results if there are no more results left.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[ExternalTaskDto]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_external_tasks_with_http_info(**kwargs) # noqa: E501
def get_external_tasks_with_http_info(self, **kwargs): # noqa: E501
"""get_external_tasks # noqa: E501
Queries for the external tasks that fulfill given parameters. Parameters may be static as well as dynamic runtime properties of executions. The size of the result set can be retrieved by using the [Get External Task Count](https://docs.camunda.org/manual/7.13/reference/rest/external-task/get-query-count/) method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_tasks_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str external_task_id: Filter by an external task's id.
:param str external_task_id_in: Filter by the comma-separated list of external task ids.
:param str topic_name: Filter by an external task topic.
:param str worker_id: Filter by the id of the worker that the task was most recently locked by.
:param bool locked: Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task.
:param bool not_locked: Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task.
:param bool with_retries_left: Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task.
:param bool no_retries_left: Only include external tasks that have 0 retries. Value may only be `true`, as `false` matches any external task.
:param datetime lock_expiration_after: Restrict to external tasks that have a lock that expires after a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param datetime lock_expiration_before: Restrict to external tasks that have a lock that expires before a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param str activity_id: Filter by the id of the activity that an external task is created for.
:param str activity_id_in: Filter by the comma-separated list of ids of the activities that an external task is created for.
:param str execution_id: Filter by the id of the execution that an external task belongs to.
:param str process_instance_id: Filter by the id of the process instance that an external task belongs to.
:param str process_instance_id_in: Filter by a comma-separated list of process instance ids that an external task may belong to.
:param str process_definition_id: Filter by the id of the process definition that an external task belongs to.
:param str tenant_id_in: Filter by a comma-separated list of tenant ids. An external task must have one of the given tenant ids.
:param bool active: Only include active tasks. Value may only be `true`, as `false` matches any external task.
:param bool suspended: Only include suspended tasks. Value may only be `true`, as `false` matches any external task.
:param int priority_higher_than_or_equals: Only include jobs with a priority higher than or equal to the given value. Value must be a valid `long` value.
:param int priority_lower_than_or_equals: Only include jobs with a priority lower than or equal to the given value. Value must be a valid `long` value.
:param str sort_by: Sort the results lexicographically by a given criterion. Must be used in conjunction with the sortOrder parameter.
:param str sort_order: Sort the results in a given order. Values may be asc for ascending order or desc for descending order. Must be used in conjunction with the sortBy parameter.
:param int first_result: Pagination of results. Specifies the index of the first result to return.
:param int max_results: Pagination of results. Specifies the maximum number of results to return. Will return less results if there are no more results left.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[ExternalTaskDto], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'external_task_id',
'external_task_id_in',
'topic_name',
'worker_id',
'locked',
'not_locked',
'with_retries_left',
'no_retries_left',
'lock_expiration_after',
'lock_expiration_before',
'activity_id',
'activity_id_in',
'execution_id',
'process_instance_id',
'process_instance_id_in',
'process_definition_id',
'tenant_id_in',
'active',
'suspended',
'priority_higher_than_or_equals',
'priority_lower_than_or_equals',
'sort_by',
'sort_order',
'first_result',
'max_results'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_external_tasks" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'external_task_id' in local_var_params and local_var_params['external_task_id'] is not None: # noqa: E501
query_params.append(('externalTaskId', local_var_params['external_task_id'])) # noqa: E501
if 'external_task_id_in' in local_var_params and local_var_params['external_task_id_in'] is not None: # noqa: E501
query_params.append(('externalTaskIdIn', local_var_params['external_task_id_in'])) # noqa: E501
if 'topic_name' in local_var_params and local_var_params['topic_name'] is not None: # noqa: E501
query_params.append(('topicName', local_var_params['topic_name'])) # noqa: E501
if 'worker_id' in local_var_params and local_var_params['worker_id'] is not None: # noqa: E501
query_params.append(('workerId', local_var_params['worker_id'])) # noqa: E501
if 'locked' in local_var_params and local_var_params['locked'] is not None: # noqa: E501
query_params.append(('locked', local_var_params['locked'])) # noqa: E501
if 'not_locked' in local_var_params and local_var_params['not_locked'] is not None: # noqa: E501
query_params.append(('notLocked', local_var_params['not_locked'])) # noqa: E501
if 'with_retries_left' in local_var_params and local_var_params['with_retries_left'] is not None: # noqa: E501
query_params.append(('withRetriesLeft', local_var_params['with_retries_left'])) # noqa: E501
if 'no_retries_left' in local_var_params and local_var_params['no_retries_left'] is not None: # noqa: E501
query_params.append(('noRetriesLeft', local_var_params['no_retries_left'])) # noqa: E501
if 'lock_expiration_after' in local_var_params and local_var_params['lock_expiration_after'] is not None: # noqa: E501
query_params.append(('lockExpirationAfter', local_var_params['lock_expiration_after'])) # noqa: E501
if 'lock_expiration_before' in local_var_params and local_var_params['lock_expiration_before'] is not None: # noqa: E501
query_params.append(('lockExpirationBefore', local_var_params['lock_expiration_before'])) # noqa: E501
if 'activity_id' in local_var_params and local_var_params['activity_id'] is not None: # noqa: E501
query_params.append(('activityId', local_var_params['activity_id'])) # noqa: E501
if 'activity_id_in' in local_var_params and local_var_params['activity_id_in'] is not None: # noqa: E501
query_params.append(('activityIdIn', local_var_params['activity_id_in'])) # noqa: E501
if 'execution_id' in local_var_params and local_var_params['execution_id'] is not None: # noqa: E501
query_params.append(('executionId', local_var_params['execution_id'])) # noqa: E501
if 'process_instance_id' in local_var_params and local_var_params['process_instance_id'] is not None: # noqa: E501
query_params.append(('processInstanceId', local_var_params['process_instance_id'])) # noqa: E501
if 'process_instance_id_in' in local_var_params and local_var_params['process_instance_id_in'] is not None: # noqa: E501
query_params.append(('processInstanceIdIn', local_var_params['process_instance_id_in'])) # noqa: E501
if 'process_definition_id' in local_var_params and local_var_params['process_definition_id'] is not None: # noqa: E501
query_params.append(('processDefinitionId', local_var_params['process_definition_id'])) # noqa: E501
if 'tenant_id_in' in local_var_params and local_var_params['tenant_id_in'] is not None: # noqa: E501
query_params.append(('tenantIdIn', local_var_params['tenant_id_in'])) # noqa: E501
if 'active' in local_var_params and local_var_params['active'] is not None: # noqa: E501
query_params.append(('active', local_var_params['active'])) # noqa: E501
if 'suspended' in local_var_params and local_var_params['suspended'] is not None: # noqa: E501
query_params.append(('suspended', local_var_params['suspended'])) # noqa: E501
if 'priority_higher_than_or_equals' in local_var_params and local_var_params['priority_higher_than_or_equals'] is not None: # noqa: E501
query_params.append(('priorityHigherThanOrEquals', local_var_params['priority_higher_than_or_equals'])) # noqa: E501
if 'priority_lower_than_or_equals' in local_var_params and local_var_params['priority_lower_than_or_equals'] is not None: # noqa: E501
query_params.append(('priorityLowerThanOrEquals', local_var_params['priority_lower_than_or_equals'])) # noqa: E501
if 'sort_by' in local_var_params and local_var_params['sort_by'] is not None: # noqa: E501
query_params.append(('sortBy', local_var_params['sort_by'])) # noqa: E501
if 'sort_order' in local_var_params and local_var_params['sort_order'] is not None: # noqa: E501
query_params.append(('sortOrder', local_var_params['sort_order'])) # noqa: E501
if 'first_result' in local_var_params and local_var_params['first_result'] is not None: # noqa: E501
query_params.append(('firstResult', local_var_params['first_result'])) # noqa: E501
if 'max_results' in local_var_params and local_var_params['max_results'] is not None: # noqa: E501
query_params.append(('maxResults', local_var_params['max_results'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExternalTaskDto]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_external_tasks_count(self, **kwargs): # noqa: E501
"""get_external_tasks_count # noqa: E501
Queries for the number of external tasks that fulfill given parameters. Takes the same parameters as the [Get External Tasks](https://docs.camunda.org/manual/7.13/reference/rest/external-task/get-query/) method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_tasks_count(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str external_task_id: Filter by an external task's id.
:param str external_task_id_in: Filter by the comma-separated list of external task ids.
:param str topic_name: Filter by an external task topic.
:param str worker_id: Filter by the id of the worker that the task was most recently locked by.
:param bool locked: Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task.
:param bool not_locked: Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task.
:param bool with_retries_left: Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task.
:param bool no_retries_left: Only include external tasks that have 0 retries. Value may only be `true`, as `false` matches any external task.
:param datetime lock_expiration_after: Restrict to external tasks that have a lock that expires after a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param datetime lock_expiration_before: Restrict to external tasks that have a lock that expires before a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param str activity_id: Filter by the id of the activity that an external task is created for.
:param str activity_id_in: Filter by the comma-separated list of ids of the activities that an external task is created for.
:param str execution_id: Filter by the id of the execution that an external task belongs to.
:param str process_instance_id: Filter by the id of the process instance that an external task belongs to.
:param str process_instance_id_in: Filter by a comma-separated list of process instance ids that an external task may belong to.
:param str process_definition_id: Filter by the id of the process definition that an external task belongs to.
:param str tenant_id_in: Filter by a comma-separated list of tenant ids. An external task must have one of the given tenant ids.
:param bool active: Only include active tasks. Value may only be `true`, as `false` matches any external task.
:param bool suspended: Only include suspended tasks. Value may only be `true`, as `false` matches any external task.
:param int priority_higher_than_or_equals: Only include jobs with a priority higher than or equal to the given value. Value must be a valid `long` value.
:param int priority_lower_than_or_equals: Only include jobs with a priority lower than or equal to the given value. Value must be a valid `long` value.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CountResultDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_external_tasks_count_with_http_info(**kwargs) # noqa: E501
def get_external_tasks_count_with_http_info(self, **kwargs): # noqa: E501
"""get_external_tasks_count # noqa: E501
Queries for the number of external tasks that fulfill given parameters. Takes the same parameters as the [Get External Tasks](https://docs.camunda.org/manual/7.13/reference/rest/external-task/get-query/) method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_external_tasks_count_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str external_task_id: Filter by an external task's id.
:param str external_task_id_in: Filter by the comma-separated list of external task ids.
:param str topic_name: Filter by an external task topic.
:param str worker_id: Filter by the id of the worker that the task was most recently locked by.
:param bool locked: Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task.
:param bool not_locked: Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task.
:param bool with_retries_left: Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task.
:param bool no_retries_left: Only include external tasks that have 0 retries. Value may only be `true`, as `false` matches any external task.
:param datetime lock_expiration_after: Restrict to external tasks that have a lock that expires after a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param datetime lock_expiration_before: Restrict to external tasks that have a lock that expires before a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`.
:param str activity_id: Filter by the id of the activity that an external task is created for.
:param str activity_id_in: Filter by the comma-separated list of ids of the activities that an external task is created for.
:param str execution_id: Filter by the id of the execution that an external task belongs to.
:param str process_instance_id: Filter by the id of the process instance that an external task belongs to.
:param str process_instance_id_in: Filter by a comma-separated list of process instance ids that an external task may belong to.
:param str process_definition_id: Filter by the id of the process definition that an external task belongs to.
:param str tenant_id_in: Filter by a comma-separated list of tenant ids. An external task must have one of the given tenant ids.
:param bool active: Only include active tasks. Value may only be `true`, as `false` matches any external task.
:param bool suspended: Only include suspended tasks. Value may only be `true`, as `false` matches any external task.
:param int priority_higher_than_or_equals: Only include jobs with a priority higher than or equal to the given value. Value must be a valid `long` value.
:param int priority_lower_than_or_equals: Only include jobs with a priority lower than or equal to the given value. Value must be a valid `long` value.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CountResultDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'external_task_id',
'external_task_id_in',
'topic_name',
'worker_id',
'locked',
'not_locked',
'with_retries_left',
'no_retries_left',
'lock_expiration_after',
'lock_expiration_before',
'activity_id',
'activity_id_in',
'execution_id',
'process_instance_id',
'process_instance_id_in',
'process_definition_id',
'tenant_id_in',
'active',
'suspended',
'priority_higher_than_or_equals',
'priority_lower_than_or_equals'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_external_tasks_count" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'external_task_id' in local_var_params and local_var_params['external_task_id'] is not None: # noqa: E501
query_params.append(('externalTaskId', local_var_params['external_task_id'])) # noqa: E501
if 'external_task_id_in' in local_var_params and local_var_params['external_task_id_in'] is not None: # noqa: E501
query_params.append(('externalTaskIdIn', local_var_params['external_task_id_in'])) # noqa: E501
if 'topic_name' in local_var_params and local_var_params['topic_name'] is not None: # noqa: E501
query_params.append(('topicName', local_var_params['topic_name'])) # noqa: E501
if 'worker_id' in local_var_params and local_var_params['worker_id'] is not None: # noqa: E501
query_params.append(('workerId', local_var_params['worker_id'])) # noqa: E501
if 'locked' in local_var_params and local_var_params['locked'] is not None: # noqa: E501
query_params.append(('locked', local_var_params['locked'])) # noqa: E501
if 'not_locked' in local_var_params and local_var_params['not_locked'] is not None: # noqa: E501
query_params.append(('notLocked', local_var_params['not_locked'])) # noqa: E501
if 'with_retries_left' in local_var_params and local_var_params['with_retries_left'] is not None: # noqa: E501
query_params.append(('withRetriesLeft', local_var_params['with_retries_left'])) # noqa: E501
if 'no_retries_left' in local_var_params and local_var_params['no_retries_left'] is not None: # noqa: E501
query_params.append(('noRetriesLeft', local_var_params['no_retries_left'])) # noqa: E501
if 'lock_expiration_after' in local_var_params and local_var_params['lock_expiration_after'] is not None: # noqa: E501
query_params.append(('lockExpirationAfter', local_var_params['lock_expiration_after'])) # noqa: E501
if 'lock_expiration_before' in local_var_params and local_var_params['lock_expiration_before'] is not None: # noqa: E501
query_params.append(('lockExpirationBefore', local_var_params['lock_expiration_before'])) # noqa: E501
if 'activity_id' in local_var_params and local_var_params['activity_id'] is not None: # noqa: E501
query_params.append(('activityId', local_var_params['activity_id'])) # noqa: E501
if 'activity_id_in' in local_var_params and local_var_params['activity_id_in'] is not None: # noqa: E501
query_params.append(('activityIdIn', local_var_params['activity_id_in'])) # noqa: E501
if 'execution_id' in local_var_params and local_var_params['execution_id'] is not None: # noqa: E501
query_params.append(('executionId', local_var_params['execution_id'])) # noqa: E501
if 'process_instance_id' in local_var_params and local_var_params['process_instance_id'] is not None: # noqa: E501
query_params.append(('processInstanceId', local_var_params['process_instance_id'])) # noqa: E501
if 'process_instance_id_in' in local_var_params and local_var_params['process_instance_id_in'] is not None: # noqa: E501
query_params.append(('processInstanceIdIn', local_var_params['process_instance_id_in'])) # noqa: E501
if 'process_definition_id' in local_var_params and local_var_params['process_definition_id'] is not None: # noqa: E501
query_params.append(('processDefinitionId', local_var_params['process_definition_id'])) # noqa: E501
if 'tenant_id_in' in local_var_params and local_var_params['tenant_id_in'] is not None: # noqa: E501
query_params.append(('tenantIdIn', local_var_params['tenant_id_in'])) # noqa: E501
if 'active' in local_var_params and local_var_params['active'] is not None: # noqa: E501
query_params.append(('active', local_var_params['active'])) # noqa: E501
if 'suspended' in local_var_params and local_var_params['suspended'] is not None: # noqa: E501
query_params.append(('suspended', local_var_params['suspended'])) # noqa: E501
if 'priority_higher_than_or_equals' in local_var_params and local_var_params['priority_higher_than_or_equals'] is not None: # noqa: E501
query_params.append(('priorityHigherThanOrEquals', local_var_params['priority_higher_than_or_equals'])) # noqa: E501
if 'priority_lower_than_or_equals' in local_var_params and local_var_params['priority_lower_than_or_equals'] is not None: # noqa: E501
query_params.append(('priorityLowerThanOrEquals', local_var_params['priority_lower_than_or_equals'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/count', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResultDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_topic_names(self, **kwargs): # noqa: E501
"""get_topic_names # noqa: E501
Queries for distinct topic names of external tasks that fulfill given parameters. Query can be restricted to only tasks with retries left, tasks that are locked, or tasks that are unlocked. The parameters withLockedTasks and withUnlockedTasks are exclusive. Setting them both to true will return an empty list. Providing no parameters will return a list of all distinct topic names with external tasks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_topic_names(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool with_locked_tasks: Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task.
:param bool with_unlocked_tasks: Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task.
:param bool with_retries_left: Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_topic_names_with_http_info(**kwargs) # noqa: E501
def get_topic_names_with_http_info(self, **kwargs): # noqa: E501
"""get_topic_names # noqa: E501
Queries for distinct topic names of external tasks that fulfill given parameters. Query can be restricted to only tasks with retries left, tasks that are locked, or tasks that are unlocked. The parameters withLockedTasks and withUnlockedTasks are exclusive. Setting them both to true will return an empty list. Providing no parameters will return a list of all distinct topic names with external tasks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_topic_names_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool with_locked_tasks: Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task.
:param bool with_unlocked_tasks: Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task.
:param bool with_retries_left: Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[str], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'with_locked_tasks',
'with_unlocked_tasks',
'with_retries_left'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_topic_names" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'with_locked_tasks' in local_var_params and local_var_params['with_locked_tasks'] is not None: # noqa: E501
query_params.append(('withLockedTasks', local_var_params['with_locked_tasks'])) # noqa: E501
if 'with_unlocked_tasks' in local_var_params and local_var_params['with_unlocked_tasks'] is not None: # noqa: E501
query_params.append(('withUnlockedTasks', local_var_params['with_unlocked_tasks'])) # noqa: E501
if 'with_retries_left' in local_var_params and local_var_params['with_retries_left'] is not None: # noqa: E501
query_params.append(('withRetriesLeft', local_var_params['with_retries_left'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/topic-names', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def handle_external_task_bpmn_error(self, id, **kwargs): # noqa: E501
"""handle_external_task_bpmn_error # noqa: E501
Reports a business error in the context of a running external task by id. The error code must be specified to identify the BPMN error handler. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_external_task_bpmn_error(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task in which context a BPMN error is reported. (required)
:param ExternalTaskBpmnError external_task_bpmn_error:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.handle_external_task_bpmn_error_with_http_info(id, **kwargs) # noqa: E501
def handle_external_task_bpmn_error_with_http_info(self, id, **kwargs): # noqa: E501
"""handle_external_task_bpmn_error # noqa: E501
Reports a business error in the context of a running external task by id. The error code must be specified to identify the BPMN error handler. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_external_task_bpmn_error_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task in which context a BPMN error is reported. (required)
:param ExternalTaskBpmnError external_task_bpmn_error:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'external_task_bpmn_error'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method handle_external_task_bpmn_error" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `handle_external_task_bpmn_error`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'external_task_bpmn_error' in local_var_params:
body_params = local_var_params['external_task_bpmn_error']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/bpmnError', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def handle_failure(self, id, **kwargs): # noqa: E501
"""handle_failure # noqa: E501
Reports a failure to execute an external task by id. A number of retries and a timeout until the task can be retried can be specified. If retries are set to 0, an incident for this task is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_failure(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to report a failure for. (required)
:param ExternalTaskFailureDto external_task_failure_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.handle_failure_with_http_info(id, **kwargs) # noqa: E501
def handle_failure_with_http_info(self, id, **kwargs): # noqa: E501
"""handle_failure # noqa: E501
Reports a failure to execute an external task by id. A number of retries and a timeout until the task can be retried can be specified. If retries are set to 0, an incident for this task is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.handle_failure_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to report a failure for. (required)
:param ExternalTaskFailureDto external_task_failure_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'external_task_failure_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method handle_failure" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `handle_failure`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'external_task_failure_dto' in local_var_params:
body_params = local_var_params['external_task_failure_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/failure', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def query_external_tasks(self, **kwargs): # noqa: E501
"""query_external_tasks # noqa: E501
Queries for external tasks that fulfill given parameters in the form of a JSON object. This method is slightly more powerful than the [Get External Tasks](https://docs.camunda.org/manual/7.13/reference/rest/external-task/get-query/) method because it allows to specify a hierarchical result sorting. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_external_tasks(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int first_result: Pagination of results. Specifies the index of the first result to return.
:param int max_results: Pagination of results. Specifies the maximum number of results to return. Will return less results if there are no more results left.
:param ExternalTaskQueryDto external_task_query_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[ExternalTaskDto]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.query_external_tasks_with_http_info(**kwargs) # noqa: E501
def query_external_tasks_with_http_info(self, **kwargs): # noqa: E501
"""query_external_tasks # noqa: E501
Queries for external tasks that fulfill given parameters in the form of a JSON object. This method is slightly more powerful than the [Get External Tasks](https://docs.camunda.org/manual/7.13/reference/rest/external-task/get-query/) method because it allows to specify a hierarchical result sorting. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_external_tasks_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int first_result: Pagination of results. Specifies the index of the first result to return.
:param int max_results: Pagination of results. Specifies the maximum number of results to return. Will return less results if there are no more results left.
:param ExternalTaskQueryDto external_task_query_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[ExternalTaskDto], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'first_result',
'max_results',
'external_task_query_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method query_external_tasks" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'first_result' in local_var_params and local_var_params['first_result'] is not None: # noqa: E501
query_params.append(('firstResult', local_var_params['first_result'])) # noqa: E501
if 'max_results' in local_var_params and local_var_params['max_results'] is not None: # noqa: E501
query_params.append(('maxResults', local_var_params['max_results'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'external_task_query_dto' in local_var_params:
body_params = local_var_params['external_task_query_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExternalTaskDto]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def query_external_tasks_count(self, **kwargs): # noqa: E501
"""query_external_tasks_count # noqa: E501
Queries for the number of external tasks that fulfill given parameters. This method takes the same message body as the [Get External Tasks (POST)](https://docs.camunda.org/manual/7.13/reference/rest/external-task/post-query/) method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_external_tasks_count(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ExternalTaskQueryDto external_task_query_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CountResultDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.query_external_tasks_count_with_http_info(**kwargs) # noqa: E501
def query_external_tasks_count_with_http_info(self, **kwargs): # noqa: E501
"""query_external_tasks_count # noqa: E501
Queries for the number of external tasks that fulfill given parameters. This method takes the same message body as the [Get External Tasks (POST)](https://docs.camunda.org/manual/7.13/reference/rest/external-task/post-query/) method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_external_tasks_count_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ExternalTaskQueryDto external_task_query_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CountResultDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'external_task_query_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method query_external_tasks_count" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'external_task_query_dto' in local_var_params:
body_params = local_var_params['external_task_query_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResultDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def set_external_task_resource_priority(self, id, **kwargs): # noqa: E501
"""set_external_task_resource_priority # noqa: E501
Sets the priority of an existing external task by id. The default value of a priority is 0. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_resource_priority(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to set the priority for. (required)
:param PriorityDto priority_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.set_external_task_resource_priority_with_http_info(id, **kwargs) # noqa: E501
def set_external_task_resource_priority_with_http_info(self, id, **kwargs): # noqa: E501
"""set_external_task_resource_priority # noqa: E501
Sets the priority of an existing external task by id. The default value of a priority is 0. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_resource_priority_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to set the priority for. (required)
:param PriorityDto priority_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'priority_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method set_external_task_resource_priority" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `set_external_task_resource_priority`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'priority_dto' in local_var_params:
body_params = local_var_params['priority_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/priority', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def set_external_task_resource_retries(self, id, **kwargs): # noqa: E501
"""set_external_task_resource_retries # noqa: E501
Sets the number of retries left to execute an external task by id. If retries are set to 0, an incident is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_resource_retries(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to set the number of retries for. (required)
:param RetriesDto retries_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.set_external_task_resource_retries_with_http_info(id, **kwargs) # noqa: E501
def set_external_task_resource_retries_with_http_info(self, id, **kwargs): # noqa: E501
"""set_external_task_resource_retries # noqa: E501
Sets the number of retries left to execute an external task by id. If retries are set to 0, an incident is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_resource_retries_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to set the number of retries for. (required)
:param RetriesDto retries_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'retries_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method set_external_task_resource_retries" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `set_external_task_resource_retries`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'retries_dto' in local_var_params:
body_params = local_var_params['retries_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/retries', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def set_external_task_retries(self, **kwargs): # noqa: E501
"""set_external_task_retries # noqa: E501
Sets the number of retries left to execute external tasks by id synchronously. If retries are set to 0, an incident is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_retries(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SetRetriesForExternalTasksDto set_retries_for_external_tasks_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.set_external_task_retries_with_http_info(**kwargs) # noqa: E501
def set_external_task_retries_with_http_info(self, **kwargs): # noqa: E501
"""set_external_task_retries # noqa: E501
Sets the number of retries left to execute external tasks by id synchronously. If retries are set to 0, an incident is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_retries_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SetRetriesForExternalTasksDto set_retries_for_external_tasks_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'set_retries_for_external_tasks_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method set_external_task_retries" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'set_retries_for_external_tasks_dto' in local_var_params:
body_params = local_var_params['set_retries_for_external_tasks_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/retries', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def set_external_task_retries_async_operation(self, **kwargs): # noqa: E501
"""set_external_task_retries_async_operation # noqa: E501
Sets the number of retries left to execute external tasks by id asynchronously. If retries are set to 0, an incident is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_retries_async_operation(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SetRetriesForExternalTasksDto set_retries_for_external_tasks_dto:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: BatchDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.set_external_task_retries_async_operation_with_http_info(**kwargs) # noqa: E501
def set_external_task_retries_async_operation_with_http_info(self, **kwargs): # noqa: E501
"""set_external_task_retries_async_operation # noqa: E501
Sets the number of retries left to execute external tasks by id asynchronously. If retries are set to 0, an incident is created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_external_task_retries_async_operation_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SetRetriesForExternalTasksDto set_retries_for_external_tasks_dto:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(BatchDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'set_retries_for_external_tasks_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method set_external_task_retries_async_operation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'set_retries_for_external_tasks_dto' in local_var_params:
body_params = local_var_params['set_retries_for_external_tasks_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/retries-async', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BatchDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def unlock(self, id, **kwargs): # noqa: E501
"""unlock # noqa: E501
Unlocks an external task by id. Clears the task's lock expiration time and worker id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unlock(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to unlock. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.unlock_with_http_info(id, **kwargs) # noqa: E501
def unlock_with_http_info(self, id, **kwargs): # noqa: E501
"""unlock # noqa: E501
Unlocks an external task by id. Clears the task's lock expiration time and worker id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unlock_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the external task to unlock. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method unlock" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `unlock`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/external-task/{id}/unlock', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 52.526572
| 424
| 0.626213
| 14,519
| 118,605
| 4.877195
| 0.027757
| 0.043044
| 0.068604
| 0.021607
| 0.983237
| 0.980851
| 0.977137
| 0.975145
| 0.970669
| 0.966065
| 0
| 0.016016
| 0.301404
| 118,605
| 2,257
| 425
| 52.549845
| 0.838614
| 0.504616
| 0
| 0.783685
| 1
| 0
| 0.204043
| 0.076101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032081
| false
| 0
| 0.004583
| 0
| 0.068744
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c10c5c8d35d6f9ddb221d59c10828ea892946d95
| 972
|
py
|
Python
|
img.py
|
shabidkhan/hamgman
|
e032d266ed4fb73e2acc7ce4bad1d8adb846ae99
|
[
"MIT"
] | 1
|
2020-02-19T13:14:17.000Z
|
2020-02-19T13:14:17.000Z
|
img.py
|
shabidkhan/hamgman
|
e032d266ed4fb73e2acc7ce4bad1d8adb846ae99
|
[
"MIT"
] | null | null | null |
img.py
|
shabidkhan/hamgman
|
e032d266ed4fb73e2acc7ce4bad1d8adb846ae99
|
[
"MIT"
] | 1
|
2020-10-29T19:01:58.000Z
|
2020-10-29T19:01:58.000Z
|
def Show_Image(chance):
image=['''
\O/
|
/ \
XXX ''','''
O
/|\
/ \
XXX ''','''
|
O |
/|\ |
/ \ |
XXX =========''','''
+------------+
|
O |
/|\ |
/ \ |
XXX =========''','''
+-------------+
| |
O |
/|\ |
/ \ |
XXX =========''','''
+-------------+
| |
(O) |
/|\ |
/ \ |
XXX =========''','''
+-------------+
| |
\(O)/ |
| |
/ \ |
=========''','''
+-------------+
| |
(O) |
||| |
/ \ |
========'''
]
return image[chance+1]
| 18.339623
| 99
| 0.069959
| 23
| 972
| 2.913043
| 0.347826
| 0.358209
| 0.447761
| 0.597015
| 0.373134
| 0.373134
| 0.373134
| 0.373134
| 0.373134
| 0.373134
| 0
| 0.003003
| 0.657407
| 972
| 52
| 100
| 18.692308
| 0.198198
| 0
| 0
| 0.680851
| 0
| 0
| 0.793814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0
| 0
| 0.042553
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c14401724d7c008e7f13b49bb431e7e8838d7834
| 110,983
|
py
|
Python
|
tests/test_format_title.py
|
SaMeHub/cds_paper_bot
|
6ea07b7295b3f493d9e8d605fd1b09b0fe283441
|
[
"MIT"
] | 2
|
2018-03-20T15:59:34.000Z
|
2018-03-21T00:18:39.000Z
|
tests/test_format_title.py
|
SaMeHub/cds_paper_bot
|
6ea07b7295b3f493d9e8d605fd1b09b0fe283441
|
[
"MIT"
] | 30
|
2018-01-12T11:36:12.000Z
|
2021-09-04T07:22:15.000Z
|
tests/test_format_title.py
|
SaMeHub/cds_paper_bot
|
6ea07b7295b3f493d9e8d605fd1b09b0fe283441
|
[
"MIT"
] | 1
|
2019-07-09T06:45:34.000Z
|
2019-07-09T06:45:34.000Z
|
"""Test title formatting."""
import sys
import os
import pytest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import cds_paper_bot # pylint: disable=wrong-import-position,import-error
class TestFormatTitle(object):
"""List of titles and what they should look like after formatting."""
@pytest.mark.parametrize(
"input_title, expected",
[
("Analysis at $\\sqrt s=13 TeV$", "Analysis at √(s) = 13 TeV"),
("\\sqrt s", "√(s)"),
("Analysis of process $x \\rightarrowy$", "Analysis of process x → y"),
("$x__s$", "x_s"),
("x →y", "x → y"),
("$t\\overline tt$", "ttt"),
("$t\\bar{t}$", "tt̅"),
("$t \\bar{t}$", "tt̅"),
("$t \\overline t$", "tt"),
("\\overline xy", "xy"),
("Bethe--Bloch", "Bethe–Bloch"),
("Bethe---Bloch", "Bethe—Bloch"),
("Energies of 15keV and MeV, 6eV", "Energies of 15 keV and MeV, 6 eV"),
("13TeV", "13 TeV"),
("nonsenseTeV", "nonsenseTeV"),
("13tev", "13tev"),
("50eV", "50 eV"),
# pylint: disable=line-too-long,too-many-lines
# CMS cms_pas_feed
(
"Measurement of differential ${\\mathrm t}\\bar{\\mathrm t}$ production cross sections for high-$p_{\\text{T}}$ top quarks in proton-proton collisions at $\\sqrt{s} = 13\\,\\text{TeV}$",
"Measurement of differential tt̅ production cross sections for high-p_T top quarks in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for long-lived particles decaying into displaced jets",
"Search for long-lived particles decaying into displaced jets",
),
(
"Study of hard color singlet exchange in dijet events with proton-proton collisions at $\\sqrt{s}= 13~\\mathrm{TeV}$",
"Study of hard color singlet exchange in dijet events with proton-proton collisions at √(s) = 13 TeV",
),
(
"Inclusive search for a highly boosted Higgs boson decaying to a bottom quark-antiquark pair at $\\sqrt{s} = 13~\\mathrm{TeV}$ with $137~\\mathrm{fb}^{-1}$",
"Inclusive search for a highly boosted Higgs boson decaying to a bottom quark-antiquark pair at √(s) = 13 TeV with 137 fb⁻¹",
),
(
"Observation of heavy triboson production in leptonic final states in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Observation of heavy triboson production in leptonic final states in proton-proton collisions at √(s) = 13 TeV",
),
(
"Studies of $\\mathrm{W^+W^-}$ production at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Studies of W⁺W⁻ production at √(s) = 13 TeV",
),
(
"Measurement of the $CP$ violating phase $\\phi_{\\text{s}}$ in the $\\mathrm{B}_s \\to \\mathrm{J}/\\psi\\,\\phi(1020) \\to \\mu^+\\mu^-\\,\\mathrm{K}^+\\mathrm{K}^-$ channel in proton-proton collisions at $\\sqrt{s} = 13~\\mathrm{TeV}$",
"Measurement of the CP violating phase ϕ_s in the B_s → J/ψ ϕ(1020) → μ⁺μ⁻ K⁺K⁻ channel in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurements of production cross sections of same-sign WW and WZ boson pairs in association with two jets in proton-proton collisions at sqrts = 13 TeV",
"Measurements of production cross sections of same-sign WW and WZ boson pairs in association with two jets in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of differential cross sections for single top quark production in association with a W boson at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Measurement of differential cross sections for single top quark production in association with a W boson at √(s) = 13 TeV",
),
(
"Measurement of the W boson rapidity, helicity, and differential cross sections in pp collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Measurement of the W boson rapidity, helicity, and differential cross sections in pp collisions at √(s) = 13 TeV",
),
(
"Search for disappearing tracks in proton-proton collisions at $\\sqrt{s} = 13$ TeV",
"Search for disappearing tracks in proton-proton collisions at √(s) = 13 TeV",
),
(
"Combined Higgs boson production and decay measurements with up to 137 fb-1 of proton-proton collision data at sqrts = 13 TeV",
"Combined Higgs boson production and decay measurements with up to 137 fb-1 of proton-proton collision data at √(s) = 13 TeV",
),
(
"Search for a light charged Higgs boson in the H$^{\\pm} \\rightarrow$ cs channel at 13 TeV",
"Search for a light charged Higgs boson in the H^± → cs channel at 13 TeV",
),
(
"Measurement of prompt $\\rm{ D_{s}^{+}}$ production in pp and PbPb collisions at $\\sqrt{s_{_{\\text{NN}}}}$ = 5.02 TeV",
"Measurement of prompt D⁺_s production in pp and PbPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Extraction of CKM matrix elements in single top quark $t$-channel events in proton-proton collisions at $\\sqrt{s} = 13$ TeV",
"Extraction of CKM matrix elements in single top quark t-channel events in proton-proton collisions at √(s) = 13 TeV",
),
(
"Nuclear modification factor of isolated photons in PbPb collisions at $\\sqrt{s_{_{\\mathrm{NN}}}} = 5.02~\\mathrm{TeV}$",
"Nuclear modification factor of isolated photons in PbPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Nuclear modification of $\\Upsilon$ states in pPb collisions at $\\sqrt{s_\\mathrm{NN}} = 5.02~\\mathrm{TeV}$",
"Nuclear modification of Υ states in pPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Search for strong electromagnetic fields in PbPb collisions at 5.02 TeV via azimuthal anisotropy of $\\mathrm{D^0}$ and $\\mathrm{\\overline{D}^0}$ mesons",
"Search for strong electromagnetic fields in PbPb collisions at 5.02 TeV via azimuthal anisotropy of D⁰ and D̅⁰ mesons",
),
(
"Studies of charm and beauty long-range correlations in pp and pPb collisions",
"Studies of charm and beauty long-range correlations in pp and pPb collisions",
),
(
"Evidence for top quark production in nucleus-nucleus collisions",
"Evidence for top quark production in nucleus-nucleus collisions",
),
(
"Evidence for $\\chi_{c1}$(3872) in PbPb collisions and studies of its prompt production at $\\sqrt{\\smash[b]{s_{_{\\mathrm{NN}}}}}=5.02$ TeV",
"Evidence for χ_c1(3872) in PbPb collisions and studies of its prompt production at √(s_NN) = 5.02 TeV",
),
(
"Study of quark- and gluon-like jet fractions using jet charge in PbPb and pp collisions at 5.02 TeV",
"Study of quark- and gluon-like jet fractions using jet charge in PbPb and pp collisions at 5.02 TeV",
),
(
"Measurement of the elliptic flow of $\\Upsilon\\textrm{(1S)}$ and $\\Upsilon\\textrm{(2S)}$ mesons in PbPb collisions at $\\sqrt{\\mathrm{s_{NN}}}=5.02~\\mathrm{TeV}$",
"Measurement of the elliptic flow of Υ(1S) and Υ(2S) mesons in PbPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Measurement of Jet Nuclear Modification Factor in PbPb Collisions at $\\sqrt{s_{NN}}$ = 5.02 TeV with CMS",
"Measurement of Jet Nuclear Modification Factor in PbPb Collisions at √(s_NN) = 5.02 TeV with CMS",
),
(
"Measurement of $\\mathrm{b}$ jet shapes in pp collisions at $\\sqrt{s} = 5.02~\\mathrm{TeV}$",
"Measurement of b jet shapes in pp collisions at √(s) = 5.02 TeV",
),
(
"Measurement of the average very forward energy as a function of the track multiplicity at central rapidities in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Measurement of the average very forward energy as a function of the track multiplicity at central rapidities in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for direct $\\tau$ slepton pair production in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for direct τ slepton pair production in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for heavy resonances in the all-hadronic vector-boson pair final state with a multi-dimensional fit",
"Search for heavy resonances in the all-hadronic vector-boson pair final state with a multi-dimensional fit",
),
(
"Study of the $\\mathrm{B}^{+}\\rightarrow \\mathrm{J}/\\psi \\bar{\\Lambda} \\mathrm{p}$ decay in proton-proton collisions at $\\sqrt{s}= 8~\\mathrm{TeV}$",
"Study of the B⁺ → J/ψΛ̅p decay in proton-proton collisions at √(s) = 8 TeV",
),
(
"Search for new physics in multilepton final states in pp collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for new physics in multilepton final states in pp collisions at √(s) = 13 TeV",
),
(
"Measurement of the $\\textrm{pp} \\rightarrow \\textrm{Z}\\textrm{Z}$ production cross section at $\\sqrt{s} = 13$ TeV with the Run 2 data set",
"Measurement of the pp → ZZ production cross section at √(s) = 13 TeV with the Run 2 data set",
),
(
"Search for standard model production of four top quarks in final states with same-sign and multiple leptons in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for standard model production of four top quarks in final states with same-sign and multiple leptons in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for a heavy Higgs boson decaying to a pair of W bosons in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for a heavy Higgs boson decaying to a pair of W bosons in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for heavy Higgs bosons decaying to a top quark pair in proton-proton collisions at $\\sqrt{s} = 13\\,\\mathrm{TeV}$",
"Search for heavy Higgs bosons decaying to a top quark pair in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of Higgs boson production and decay to the $\\tau\\tau$ final state",
"Measurement of Higgs boson production and decay to the ττ final state",
),
(
"Measurements of properties of the Higgs boson in the four-lepton final state in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Measurements of properties of the Higgs boson in the four-lepton final state in proton-proton collisions at √(s) = 13 TeV",
),
(
"First constraints on invisible Higgs boson decays using $\\mathrm{t}\\bar{\\mathrm{t}}\\mathrm{H}$ production at $\\sqrt{s}=13~\\mathrm{TeV}$",
"First constraints on invisible Higgs boson decays using tt̅H production at √(s) = 13 TeV",
),
(
"Combined search for gauge-mediated supersymmetry with photons in 13 TeV collisions at the CMS experiment",
"Combined search for gauge-mediated supersymmetry with photons in 13 TeV collisions at the CMS experiment",
),
(
"Evidence for WW production from double-parton interactions in proton-proton collisions at $\\sqrt{s}$ = 13 TeV",
"Evidence for WW production from double-parton interactions in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for $\\tau \\to 3\\mu$ decays using $\\tau$ leptons produced in D and B meson decays",
"Search for τ → 3μ decays using τ leptons produced in D and B meson decays",
),
(
"Search for physics beyond the standard model in events with two same-sign leptons or at least three leptons and jets in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$.",
"Search for physics beyond the standard model in events with two same-sign leptons or at least three leptons and jets in proton-proton collisions at √(s) = 13 TeV.",
),
(
"Searches for new phenomena in events with jets and high values of the $M_{\\mathrm{T2}}$ variable, including signatures with disappearing tracks, in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Searches for new phenomena in events with jets and high values of the M_T2 variable, including signatures with disappearing tracks, in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for long-lived particles using delayed jets and missing transverse momentum with proton-proton collisions at $\\sqrt{s}$ = 13 TeV",
"Search for long-lived particles using delayed jets and missing transverse momentum with proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for excited leptons decaying via contact interaction to two leptons and two jets",
"Search for excited leptons decaying via contact interaction to two leptons and two jets",
),
(
"Search for a pseudoscalar boson in the mass range from 4 to 15 GeV produced in decays of the 125 GeV Higgs boson in the final states with two muons and two nearby tracks at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for a pseudoscalar boson in the mass range from 4 to 15 GeV produced in decays of the 125 GeV Higgs boson in the final states with two muons and two nearby tracks at √(s) = 13 TeV",
),
(
"Search for boosted quark-antiquark resonances produced in association with a photon at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for boosted quark-antiquark resonances produced in association with a photon at √(s) = 13 TeV",
),
(
"Search for new physics in events with closely collimated photons and gluons",
"Search for new physics in events with closely collimated photons and gluons",
),
(
"Search for Pair Production of Vector-Like Quarks in the Fully Hadronic Channel",
"Search for Pair Production of Vector-Like Quarks in the Fully Hadronic Channel",
),
(
"Measurements of Higgs boson production via gluon fusion and vector boson fusion in the diphoton decay channel at $\\sqrt{s} = 13$ TeV",
"Measurements of Higgs boson production via gluon fusion and vector boson fusion in the diphoton decay channel at √(s) = 13 TeV",
),
(
"Search for a charged Higgs boson decaying into top and bottom quarks in proton-proton collisions at 13TeV in events with electrons or muons",
"Search for a charged Higgs boson decaying into top and bottom quarks in proton-proton collisions at 13 TeV in events with electrons or muons",
),
# CMS cms_paper_feed
(
"Combination of the W boson polarization measurements in top quark decays using ATLAS and CMS data at $\\sqrt{s} = $ 8 TeV",
"Combination of the W boson polarization measurements in top quark decays using ATLAS and CMS data at √(s) = 8 TeV",
),
(
"Measurements of production cross sections of WZ and same-sign WW boson pairs in association with two jets in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Measurements of production cross sections of WZ and same-sign WW boson pairs in association with two jets in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of CKM matrix elements in single top quark $t$-channel production in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of CKM matrix elements in single top quark t-channel production in proton-proton collisions at √(s) = 13 TeV",
),
(
"Identification of heavy, energetic, hadronically decaying particles using machine-learning techniques",
"Identification of heavy, energetic, hadronically decaying particles using machine-learning techniques",
),
(
"Search for disappearing tracks in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for disappearing tracks in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of quark- and gluon-like jet fractions using jet charge in PbPb and pp collisions at 5.02 TeV",
"Measurement of quark- and gluon-like jet fractions using jet charge in PbPb and pp collisions at 5.02 TeV",
),
(
"The production of isolated photons in PbPb and pp collisions at ${\\sqrt {\\smash [b]{s_{_{\\mathrm {NN}}}}}} = $ 5.02 TeV",
"The production of isolated photons in PbPb and pp collisions at √(s_NN) = 5.02 TeV",
),
(
"Measurements of ${\\mathrm{t\\bar{t}}\\mathrm{H}} $ production and the CP structure of the Yukawa interaction between the Higgs boson and top quark in the diphoton decay channel",
"Measurements of tt̅H production and the CP structure of the Yukawa interaction between the Higgs boson and top quark in the diphoton decay channel",
),
(
"Measurement of the cross section for $\\mathrm{t\\bar{t}}$ production with additional jets and b jets in pp collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of the cross section for tt̅ production with additional jets and b jets in pp collisions at √(s) = 13 TeV",
),
(
"Study of central exclusive $\\pi^{+}\\pi^{-}$ production in proton-proton collisions at $\\sqrt{s} = $ 5.02 and 13 TeV",
"Study of central exclusive π⁺π⁻ production in proton-proton collisions at √(s) = 5.02 and 13 TeV",
),
(
"Pileup mitigation at CMS in 13 TeV data",
"Pileup mitigation at CMS in 13 TeV data",
),
(
"Measurement of single-diffractive dijet production in proton-proton collisions at $\\sqrt{s} =$ 8 TeV with the CMS and TOTEM experiments",
"Measurement of single-diffractive dijet production in proton-proton collisions at √(s) = 8 TeV with the CMS and TOTEM experiments",
),
(
"Measurement of the cross section for electroweak production of a Z boson, a photon and two jets in proton-proton collisions at $\\sqrt{s} = $ 13 TeV and constraints on anomalous quartic couplings",
"Measurement of the cross section for electroweak production of a Z boson, a photon and two jets in proton-proton collisions at √(s) = 13 TeV and constraints on anomalous quartic couplings",
),
(
"A measurement of the Higgs boson mass in the diphoton decay channel",
"A measurement of the Higgs boson mass in the diphoton decay channel",
),
(
"Measurement of the $\\Upsilon(\\text{1S}) $ pair production cross section and search for resonances decaying to $\\Upsilon(\\text{1S}) \\mu^{+}\\mu^{-}$ in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of the Υ(1S) pair production cross section and search for resonances decaying to Υ(1S) μ⁺μ⁻ in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for physics beyond the standard model in events with jets and two same-sign or at least three charged leptons in proton-proton collisions at $\\sqrt{s}=$ 13 TeV",
"Search for physics beyond the standard model in events with jets and two same-sign or at least three charged leptons in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for charged Higgs bosons decaying into a top and a bottom quark in the all-jet final state of pp collisions at $\\sqrt{s}=$ 13 TeV",
"Search for charged Higgs bosons decaying into a top and a bottom quark in the all-jet final state of pp collisions at √(s) = 13 TeV",
),
(
"Measurement of the associated production of a Z boson with charm or bottom quark jets in proton-proton collisions at $\\sqrt{s}=$ 13 TeV",
"Measurement of the associated production of a Z boson with charm or bottom quark jets in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurements of dose-rate effects in the radiation damage of plastic scintillator tiles using silicon photomultipliers",
"Measurements of dose-rate effects in the radiation damage of plastic scintillator tiles using silicon photomultipliers",
),
(
"Study of excited $\\Lambda_{\\mathrm{b}}^{0}$ states decaying to $\\Lambda_{\\mathrm{b}}^{0}\\pi^{+}\\pi^{-}$ in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Study of excited Λ⁰_b states decaying to Λ⁰_b π⁺π⁻ in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for an excited lepton that decays via a contact interaction to a lepton and two jets in proton-proton collisions at ${\\sqrt{s}} = $ 13 TeV",
"Search for an excited lepton that decays via a contact interaction to a lepton and two jets in proton-proton collisions at √(s) = 13 TeV",
),
(
"A deep neural network to search for new long-lived particles decaying to jets",
"A deep neural network to search for new long-lived particles decaying to jets",
),
(
"Measurement of the top quark forward-backward production asymmetry and the anomalous chromoelectric and chromomagnetic moments in pp collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of the top quark forward-backward production asymmetry and the anomalous chromoelectric and chromomagnetic moments in pp collisions at √(s) = 13 TeV",
),
(
"Search for direct top squark pair production in events with one lepton, jets, and missing transverse momentum at 13 TeV with the CMS experiment",
"Search for direct top squark pair production in events with one lepton, jets, and missing transverse momentum at 13 TeV with the CMS experiment",
),
(
"Measurement of the ${\\chi_{\\mathrm{c}1}}$ and ${\\chi_{\\mathrm{c}2}}$ polarizations in proton-proton collisions at $\\sqrt{s} = $ 8 TeV",
"Measurement of the χ_c1 and χ_c2 polarizations in proton-proton collisions at √(s) = 8 TeV",
),
(
"Extraction and validation of a new set of CMS PYTHIA-8 tunes from underlying-event measurements",
"Extraction and validation of a new set of CMS PYTHIA-8 tunes from underlying-event measurements",
),
(
"Search for new physics in top quark production in dilepton final states in proton-proton collisions at $\\sqrt{s}$ = 13 TeV",
"Search for new physics in top quark production in dilepton final states in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for a low-mass $\\tau^{-}\\tau^{+}$ resonance in association with a bottom quark in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for a low-mass τ⁻τ⁺ resonance in association with a bottom quark in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for supersymmetry in final states with photons and missing transverse momentum in proton-proton collisions at 13 TeV",
"Search for supersymmetry in final states with photons and missing transverse momentum in proton-proton collisions at 13 TeV",
),
(
"Constraints on anomalous HVV couplings from the production of Higgs bosons decaying to $\\tau$ lepton pairs",
"Constraints on anomalous HVV couplings from the production of Higgs bosons decaying to τ lepton pairs",
),
(
"Performance of missing transverse momentum reconstruction in proton-proton collisions at $\\sqrt{s} = $ 13 TeV using the CMS detector",
"Performance of missing transverse momentum reconstruction in proton-proton collisions at √(s) = 13 TeV using the CMS detector",
),
(
"Search for charged Higgs bosons in the $\\mathrm{H}^{\\pm} \\to \\tau^{\\pm}\\nu_\\tau$ decay channel in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for charged Higgs bosons in the H^± → τ^±ν_τ decay channel in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of electroweak production of a W boson in association with two jets in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of electroweak production of a W boson in association with two jets in proton-proton collisions at √(s) = 13 TeV",
),
(
"An embedding technique to determine $\\tau\\tau$ backgrounds in proton-proton collision data",
"An embedding technique to determine ττ backgrounds in proton-proton collision data",
),
(
"Search for a heavy pseudoscalar boson decaying to a Z and a Higgs boson at $\\sqrt{s} = $ 13 TeV",
"Search for a heavy pseudoscalar boson decaying to a Z and a Higgs boson at √(s) = 13 TeV",
),
(
"Combinations of single-top-quark production cross-section measurements and $|f_{\\rm LV}V_{tb}|$ determinations at $\\sqrt{s}=7$ and 8 TeV with the ATLAS and CMS experiments",
"Combinations of single-top-quark production cross-section measurements and |f_LVV_tb| determinations at √(s) = 7 and 8 TeV with the ATLAS and CMS experiments",
),
(
"Azimuthal separation in nearly back-to-back jet topologies in inclusive 2- and 3-jet events in pp collisions at $\\sqrt{s}=$ 13 TeV",
"Azimuthal separation in nearly back-to-back jet topologies in inclusive 2- and 3-jet events in pp collisions at √(s) = 13 TeV",
),
(
"Pseudorapidity distributions of charged hadrons in xenon-xenon collisions at ${\\sqrt {\\smash [b]{s_{_{\\mathrm {NN}}}}}} = $ 5.44 TeV",
"Pseudorapidity distributions of charged hadrons in xenon-xenon collisions at √(s_NN) = 5.44 TeV",
),
(
"Measurement of exclusive $\\rho(770)^{0}$ photoproduction in ultraperipheral pPb collisions at ${\\sqrt {\\smash [b]{s_{_{\\mathrm {NN}}}}}} = $ 5.02 TeV",
"Measurement of exclusive ρ⁰(770) photoproduction in ultraperipheral pPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Observation of two excited $ \\mathrm{B^{+}_{c}} $ states and measurement of the ${\\mathrm{B^{+}_{c}} \\text{(2S)}}$ mass in pp collisions at $\\sqrt{s} = $ 13 TeV",
"Observation of two excited B⁺_c states and measurement of the B⁺_c (2S) mass in pp collisions at √(s) = 13 TeV",
),
(
"Search for W boson decays to three charged pions",
"Search for W boson decays to three charged pions",
),
(
"Charged-particle angular correlations in XeXe collisions at ${\\sqrt {\\smash [b]{s_{_{\\mathrm {NN}}}}}} = $ 5.44 TeV",
"Charged-particle angular correlations in XeXe collisions at √(s_NN) = 5.44 TeV",
),
(
"Search for supersymmetry in events with a photon, jets, b-jets, and missing transverse momentum in proton-proton collisions at 13 TeV",
"Search for supersymmetry in events with a photon, jets, b-jets, and missing transverse momentum in proton-proton collisions at 13 TeV",
),
(
"Measurement of electroweak WZ boson production and search for new physics in WZ + two jets events in pp collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of electroweak WZ boson production and search for new physics in WZ + two jets events in pp collisions at √(s) = 13 TeV",
),
(
"Measurements of the ${{\\mathrm{p}}{\\mathrm{p}}\\to\\mathrm{W}\\mathrm{Z}}$ inclusive and differential production cross section and constraints on charged anomalous triple gauge couplings at ${\\sqrt{s}} = $ 13 TeV",
"Measurements of the pp → WZ inclusive and differential production cross section and constraints on charged anomalous triple gauge couplings at √(s) = 13 TeV",
),
(
"Search for dark matter produced in association with a single top quark or a top quark pair in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for dark matter produced in association with a single top quark or a top quark pair in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for the pair production of light top squarks in the $\\mathrm{e}^{\\pm}\\mu^{\\mp}$ final state in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for the pair production of light top squarks in the e^±μ^∓ final state in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurements of the Higgs boson width and anomalous HVV couplings from on-shell and off-shell production in the four-lepton final state",
"Measurements of the Higgs boson width and anomalous HVV couplings from on-shell and off-shell production in the four-lepton final state",
),
(
"Measurement of the $ \\mathrm{t\\bar{t}} $ production cross section, the top quark mass, and the strong coupling constant using dilepton events in pp collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of the tt̅ production cross section, the top quark mass, and the strong coupling constant using dilepton events in pp collisions at √(s) = 13 TeV",
),
(
"Measurement of the differential Drell-Yan cross section in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of the differential Drell-Yan cross section in proton-proton collisions at √(s) = 13 TeV",
),
# CMS cms_pas_feed
(
"Measurements of differential Higgs boson production cross sections in the leptonic WW decay mode at $\\sqrt{s} = 13~\\mathrm{TeV}$",
"Measurements of differential Higgs boson production cross sections in the leptonic WW decay mode at √(s) = 13 TeV",
),
(
"A measurement of the Higgs boson mass in the diphoton decay channel",
"A measurement of the Higgs boson mass in the diphoton decay channel",
),
(
"A deep neural network for simultaneous estimation of b quark energy and resolution",
"A deep neural network for simultaneous estimation of b quark energy and resolution",
),
(
"Template measurement of the top quark forward-backward asymmetry and anomalous chromoelectric and chromomagnetic moments in the semileptonic channel at sqrt(s)=13 TeV",
"Template measurement of the top quark forward-backward asymmetry and anomalous chromoelectric and chromomagnetic moments in the semileptonic channel at sqrt(s) = 13 TeV",
),
(
"Search for supersymmetry in pp collisions at $\\sqrt{s}=13~\\mathrm{TeV}$ with $137~\\mathrm{fb}^{-1}$ in the final state with a single lepton using the sum of masses of large-radius jets",
"Search for supersymmetry in pp collisions at √(s) = 13 TeV with 137 fb⁻¹ in the final state with a single lepton using the sum of masses of large-radius jets",
),
(
"Measurement of the top quark pair production cross section in the dilepton channel including a $\\tau$ lepton in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Measurement of the top quark pair production cross section in the dilepton channel including a τ lepton in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for a narrow resonance decaying to a pair of muons in proton-proton collisions at 13 TeV",
"Search for a narrow resonance decaying to a pair of muons in proton-proton collisions at 13 TeV",
),
(
"Observation of the $\\Lambda_{\\mathrm{b}} \\to \\mathrm{J}/\\psi \\Lambda \\phi$ decay in proton-proton collisions at $\\sqrt{s}=$ 13 TeV",
"Observation of the Λ_b → J/ψΛϕ decay in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of properties of Bs0 to mu+mu- decays and search for B0 to mu+mu- with the CMS experiment",
"Measurement of properties of Bs0 to mu+mu- decays and search for B0 to mu+mu- with the CMS experiment",
),
(
"Search for supersymmetry with a compressed mass spectrum in events with a soft $\\tau$ lepton, a highly energetic jet, and large missing transverse momentum in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for supersymmetry with a compressed mass spectrum in events with a soft τ lepton, a highly energetic jet, and large missing transverse momentum in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of the cross section for $\\mathrm{t}\\bar{\\mathrm{t}}$ production with additional jets and b jets in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Measurement of the cross section for tt̅ production with additional jets and b jets in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for a narrow resonance in high-mass dilepton final states in proton-proton collisions using 140$~\\mathrm{fb}^{-1}$ of data at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for a narrow resonance in high-mass dilepton final states in proton-proton collisions using 140 fb⁻¹ of data at √(s) = 13 TeV",
),
(
"Search for dijet resonances in events with three jets from proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Search for dijet resonances in events with three jets from proton-proton collisions at √(s) = 13 TeV",
),
(
"First measurement of the running of the top quark mass",
"First measurement of the running of the top quark mass",
),
(
"Measurement of the associated production of a Z boson with charm or bottom quark jets in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$",
"Measurement of the associated production of a Z boson with charm or bottom quark jets in proton-proton collisions at √(s) = 13 TeV",
),
(
"Machine learning-based identification of highly Lorentz-boosted hadronically decaying particles at the CMS experiment",
"Machine learning-based identification of highly Lorentz-boosted hadronically decaying particles at the CMS experiment",
),
(
"Pileup mitigation at CMS in 13 TeV data",
"Pileup mitigation at CMS in 13 TeV data",
),
(
"Search for the standard model Higgs boson decaying to charm quarks",
"Search for the standard model Higgs boson decaying to charm quarks",
),
(
"Measurement of the jet mass distribution in highly boosted top quark decays in pp collisions at $\\sqrt{s}=13~\\text{TeV}$",
"Measurement of the jet mass distribution in highly boosted top quark decays in pp collisions at √(s) = 13 TeV",
),
(
"Search for the resonant production of a pair of Higgs bosons decaying to the bb-barZZ final state",
"Search for the resonant production of a pair of Higgs bosons decaying to the bb-barZZ final state",
),
(
"Measurement of the dependence of inclusive jet production cross sections on the anti- $k_{\\mathrm{T}}$ distance parameter in proton-proton collisions at sqrt(s) 13 TeV",
"Measurement of the dependence of inclusive jet production cross sections on the anti- k_T distance parameter in proton-proton collisions at sqrt(s) 13 TeV",
),
(
"Measurement of electroweak production of Z gamma in association with two jets in proton-proton collisions at sqrt(s) = 13 TeV",
"Measurement of electroweak production of Z gamma in association with two jets in proton-proton collisions at sqrt(s) = 13 TeV",
),
(
"Measurement of the associated production of a W boson and a charm quark at $\\sqrt{s}=8~\\mathrm{TeV}$",
"Measurement of the associated production of a W boson and a charm quark at √(s) = 8 TeV",
),
(
"Search for direct top squark pair production in events with one lepton, jets and missing transverse energy at 13 TeV",
"Search for direct top squark pair production in events with one lepton, jets and missing transverse energy at 13 TeV",
),
(
"A search for dijet resonances in proton-proton collisions at $\\sqrt{s}=13~\\mathrm{TeV}$ with a new background prediction method",
"A search for dijet resonances in proton-proton collisions at √(s) = 13 TeV with a new background prediction method",
),
# CMS cms_paper_feed
(
"Bose-Einstein correlations of charged hadrons in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Bose-Einstein correlations of charged hadrons in proton-proton collisions at √(s) = 13 TeV",
),
(
"Mixed higher-order anisotropic flow and nonlinear response coefficients of charged particles in PbPb collisions at ${\\sqrt {\\smash [b]{s_{_{\\mathrm {NN}}}}}} = $ 2.76 and 5.02 TeV",
"Mixed higher-order anisotropic flow and nonlinear response coefficients of charged particles in PbPb collisions at √(s_NN) = 2.76 and 5.02 TeV",
),
(
"Strange hadron production in pp and pPb collisions at ${\\sqrt {\\smash [b]{s_{_{\\mathrm {NN}}}}}} = $ 5.02 TeV",
"Strange hadron production in pp and pPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Study of $\\mathrm{J}/\\psi$ meson production from jet fragmentation in pp collisions at $\\sqrt{s} = $ 8 TeV",
"Study of J/ψ meson production from jet fragmentation in pp collisions at √(s) = 8 TeV",
),
(
"Search for supersymmetry with a compressed mass spectrum in events with a soft $\\tau$ lepton, a highly energetic jet, and large missing transverse momentum in proton-proton collisions at $\\sqrt{s} =$ 13 TeV",
"Search for supersymmetry with a compressed mass spectrum in events with a soft τ lepton, a highly energetic jet, and large missing transverse momentum in proton-proton collisions at √(s) = 13 TeV",
),
(
"Calibration of the CMS hadron calorimeters using proton-proton collision data at $\\sqrt{s} = $ 13 TeV",
"Calibration of the CMS hadron calorimeters using proton-proton collision data at √(s) = 13 TeV",
),
(
"Running of the top quark mass from proton-proton collisions at ${\\sqrt{s}} = $ 13 TeV",
"Running of the top quark mass from proton-proton collisions at √(s) = 13 TeV",
),
(
"Evidence for WW production from double-parton interactions in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Evidence for WW production from double-parton interactions in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for long-lived particles using delayed photons in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for long-lived particles using delayed photons in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of the $\\mathrm{t\\bar{t}}\\mathrm{b\\bar{b}}$ production cross section in the all-jet final state in pp collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of the tt̅bb̅ production cross section in the all-jet final state in pp collisions at √(s) = 13 TeV",
),
(
"Search for electroweak production of a vector-like T quark using fully hadronic final states",
"Search for electroweak production of a vector-like T quark using fully hadronic final states",
),
(
"Measurements of differential Z boson production cross sections in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Measurements of differential Z boson production cross sections in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for low mass vector resonances decaying into quark-antiquark pairs in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for low mass vector resonances decaying into quark-antiquark pairs in proton-proton collisions at √(s) = 13 TeV",
),
(
"Searches for physics beyond the standard model with the ${M_{\\mathrm{T2}}}$ variable in hadronic final states with and without disappearing tracks in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Searches for physics beyond the standard model with the M_T2 variable in hadronic final states with and without disappearing tracks in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for a charged Higgs boson decaying into top and bottom quarks in proton-proton collisions at $\\sqrt{s} = $ 13 TeV in events with electrons or muons",
"Search for a charged Higgs boson decaying into top and bottom quarks in proton-proton collisions at √(s) = 13 TeV in events with electrons or muons",
),
(
"Search for supersymmetry using Higgs boson to diphoton decays at $\\sqrt{s} = $ 13 TeV",
"Search for supersymmetry using Higgs boson to diphoton decays at √(s) = 13 TeV",
),
(
"Search for production of four top quarks in final states with same-sign or multiple leptons in proton-proton collisions at $\\sqrt{s}= $ 13 TeV",
"Search for production of four top quarks in final states with same-sign or multiple leptons in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for supersymmetry in proton-proton collisions at 13 TeV in final states with jets and missing transverse momentum",
"Search for supersymmetry in proton-proton collisions at 13 TeV in final states with jets and missing transverse momentum",
),
(
"Search for dark photons in decays of Higgs bosons produced in association with Z bosons in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for dark photons in decays of Higgs bosons produced in association with Z bosons in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for dark matter particles produced in association with a Higgs boson in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for dark matter particles produced in association with a Higgs boson in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for heavy Higgs bosons decaying to a top quark pair in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for heavy Higgs bosons decaying to a top quark pair in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for direct pair production of supersymmetric partners to the $\\tau$ lepton in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for direct pair production of supersymmetric partners to the τ lepton in proton-proton collisions at √(s) = 13 TeV",
),
(
"Measurement of top quark pair production in association with a Z boson in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Measurement of top quark pair production in association with a Z boson in proton-proton collisions at √(s) = 13 TeV",
),
(
"Search for anomalous triple gauge couplings in WW and WZ production in lepton + jet events in proton-proton collisions at $\\sqrt{s} = $ 13 TeV",
"Search for anomalous triple gauge couplings in WW and WZ production in lepton + jet events in proton-proton collisions at √(s) = 13 TeV",
),
# ATLAS atlas_conf_feed
(
"Search for bottom-squark pair production with the ATLAS detector in final states containing Higgs bosons, $b$-jets and missing transverse momentum",
"Search for bottom-squark pair production with the ATLAS detector in final states containing Higgs bosons, b-jets and missing transverse momentum",
),
(
"Search for heavy neutral Higgs bosons produced in association with $b$-quarks and decaying to $b$-quarks at $\\sqrt{s}=13$~TeV with the ATLAS detector",
"Search for heavy neutral Higgs bosons produced in association with b-quarks and decaying to b-quarks at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurement of the CP violation phase $\\phi_{s}$ in $B_{s}\\to J/\\psi \\phi$ decays in ATLAS at 13 TeV",
"Measurement of the CP violation phase ϕ_s in B_s → J/ψϕ decays in ATLAS at 13 TeV",
),
(
"Search for electroweak production of charginos and sleptons decaying in final states with two leptons and missing transverse momentum in $\\sqrt{s}=13$ TeV $pp$ collisions using the ATLAS detector",
"Search for electroweak production of charginos and sleptons decaying in final states with two leptons and missing transverse momentum in √(s) = 13 TeV pp collisions using the ATLAS detector",
),
(
"Search for New Phenomena in Dijet Events using 139 fb$^{−1}$ of $pp$ collisions at $\\sqrt{s}$ = 13TeV collected with the ATLAS Detector",
"Search for New Phenomena in Dijet Events using 139 fb^−1 of pp collisions at √(s) = 13 TeV collected with the ATLAS Detector",
),
(
"Search for long-lived, massive particles in events with a displaced vertex and a displaced muon in $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Search for long-lived, massive particles in events with a displaced vertex and a displaced muon in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Combined measurements of Higgs boson production and decay using up to $80$ fb$^{-1}$ of proton--proton collision data at $\\sqrt{s}=$ 13 TeV collected with the ATLAS experiment",
"Combined measurements of Higgs boson production and decay using up to 80 fb⁻¹ of proton–proton collision data at √(s) = 13 TeV collected with the ATLAS experiment",
),
(
"Measurement of Higgs boson production in association with a $t\\overline t$ pair in the diphoton decay channel using 139~fb$^{-1}$ of LHC data collected at $\\sqrt{s} = 13$~TeV by the ATLAS experiment",
"Measurement of Higgs boson production in association with a tt pair in the diphoton decay channel using 139 fb⁻¹ of LHC data collected at √(s) = 13 TeV by the ATLAS experiment",
),
(
"Search for diboson resonances in hadronic final states in 139 fb$^{-1}$ of $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Search for diboson resonances in hadronic final states in 139 fb⁻¹ of pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Observation of light-by-light scattering in ultraperipheral Pb+Pb collisions with the ATLAS detector",
"Observation of light-by-light scattering in ultraperipheral Pb+Pb collisions with the ATLAS detector",
),
(
"Search for high-mass dilepton resonances using $139\\,\\mathrm{fb}^{-1}$ of $pp$ collision data collected at $\\sqrt{s}=13\\,\\mathrm{TeV}$ with the ATLAS detector",
"Search for high-mass dilepton resonances using 139 fb⁻¹ of pp collision data collected at √(s) = 13 TeV with the ATLAS detector",
),
(
"Calibration of the $b$-tagging efficiency on charm jets using a sample of $W$+$c$ events with $\\sqrt{s}$ = 13 TeV ATLAS data",
"Calibration of the b-tagging efficiency on charm jets using a sample of W+c events with √(s) = 13 TeV ATLAS data",
),
(
"Combination of searches for invisible Higgs boson decays with the ATLAS experiment",
"Combination of searches for invisible Higgs boson decays with the ATLAS experiment",
),
(
"Measurements of $VH$, $H \\to b\\bar{b}$ production as a function of the vector boson transverse momentum in 13 TeV pp collisions with the ATLAS detector",
"Measurements of VH, H → bb̅ production as a function of the vector boson transverse momentum in 13 TeV pp collisions with the ATLAS detector",
),
(
"Search for boosted resonances decaying to two b-quarks and produced in association with a jet at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Search for boosted resonances decaying to two b-quarks and produced in association with a jet at √(s) = 13 TeV with the ATLAS detector",
),
(
"Constraints on mediator-based dark matter models using $\\sqrt s = 13$ TeV $pp$ collisions at the LHC with the ATLAS detector",
"Constraints on mediator-based dark matter models using √(s) = 13 TeV pp collisions at the LHC with the ATLAS detector",
),
(
"Dijet azimuthal correlations and conditional yields in $p\\!p$ and $p$+Pb collisions at $\\sqrt{s_{_\\text{NN}}}$~=~5.02 TeV with the ATLAS detector",
"Dijet azimuthal correlations and conditional yields in pp and p+Pb collisions at √(s_NN) = 5.02 TeV with the ATLAS detector",
),
(
"Search for top quark decays t\\rightarrowHq with 36 fb^{−1} of pp collision data at \\sqrt{s} = 13 TeV with the ATLAS detector",
"Search for top quark decays t → Hq with 36 fb^−1 of pp collision data at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurements of inclusive and differential cross-sections of $t\\bar{t}\\gamma$ production in leptonic final states in a fiducial volume at $\\sqrt{s}=13~\\text{TeV}$ in ATLAS",
"Measurements of inclusive and differential cross-sections of tt̅γ production in leptonic final states in a fiducial volume at √(s) = 13 TeV in ATLAS",
),
(
"Measurement of the $t\\bar{t}W$ and $t\\bar{t}Z$ cross sections in proton–proton collisions at $\\sqrt{s}$ = 13 TeV with the ATLAS detector",
"Measurement of the tt̅W and tt̅Z cross sections in proton–proton collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Study of the rare decays of B0s and B0 into muon pairs from data collected during 2015 and 2016 with the ATLAS detector",
"Study of the rare decays of B0s and B0 into muon pairs from data collected during 2015 and 2016 with the ATLAS detector",
),
(
"Calibration of the ATLAS $b$-tagging algorithm in $t\\bar{t}$ semi-leptonic events",
"Calibration of the ATLAS b-tagging algorithm in tt̅ semi-leptonic events",
),
(
"Search for charged lepton-flavour violation in top-quark decays at the LHC with the ATLAS detector",
"Search for charged lepton-flavour violation in top-quark decays at the LHC with the ATLAS detector",
),
(
"Combination of searches for Higgs boson pairs in $pp$ collisions at 13 TeV with the ATLAS experiment.",
"Combination of searches for Higgs boson pairs in pp collisions at 13 TeV with the ATLAS experiment.",
),
(
"Search for direct chargino pair production with W-boson mediated decays in events with two leptons and missing transverse momentum at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Search for direct chargino pair production with W-boson mediated decays in events with two leptons and missing transverse momentum at √(s) = 13 TeV with the ATLAS detector",
),
# ATLAS atlas_paper_feed
(
"Observation of light-by-light scattering in ultraperipheral Pb+Pb collisions with the ATLAS detector",
"Observation of light-by-light scattering in ultraperipheral Pb+Pb collisions with the ATLAS detector",
),
(
"Evidence for the production of three massive vector bosons with the ATLAS detector",
"Evidence for the production of three massive vector bosons with the ATLAS detector",
),
(
"Measurement of the production cross section for a Higgs boson in association with a vector boson in the $H \\rightarrow WW^{\\ast} \\rightarrow \\ell\\nu\\ell\\nu$ channel in $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Measurement of the production cross section for a Higgs boson in association with a vector boson in the H → WW^∗ → ℓνℓν channel in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurements of top-quark pair spin correlations in the $e\\mu$ channel at $\\sqrt{s} = 13$ TeV using $pp$ collisions in the ATLAS detector",
"Measurements of top-quark pair spin correlations in the eμ channel at √(s) = 13 TeV using pp collisions in the ATLAS detector",
),
(
"Search for high-mass dilepton resonances using 139 fb$^{-1}$ of $pp$ collision data collected at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Search for high-mass dilepton resonances using 139 fb⁻¹ of pp collision data collected at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurement of $VH$, $H\\to b\\bar{b}$ production as a function of the vector-boson transverse momentum in 13 TeV $pp$ collisions with the ATLAS detector",
"Measurement of VH, H → bb̅ production as a function of the vector-boson transverse momentum in 13 TeV pp collisions with the ATLAS detector",
),
(
"Measurement of jet-substructure observables in top quark, $W$ boson and light jet production in proton-proton collisions at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Measurement of jet-substructure observables in top quark, W boson and light jet production in proton-proton collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurement of prompt photon production in $\\sqrt{s_\\mathrm{NN}} = 8.16$ TeV $p$+Pb collisions with ATLAS",
"Measurement of prompt photon production in √(s_NN) = 8.16 TeV p+Pb collisions with ATLAS",
),
(
"Constraints on mediator-based dark matter and scalar dark energy models using $\\sqrt{s}= 13$ TeV $pp$ collision data collected by the ATLAS detector",
"Constraints on mediator-based dark matter and scalar dark energy models using √(s) = 13 TeV pp collision data collected by the ATLAS detector",
),
(
"Search for heavy particles decaying into a top-quark pair in the fully hadronic final state in $pp$ collisions at $\\sqrt{s} =$13 TeV with the ATLAS detector",
"Search for heavy particles decaying into a top-quark pair in the fully hadronic final state in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Comparison of fragmentation functions for light-quark- and gluon-dominated jets from $pp$ and Pb+Pb collisions in ATLAS",
"Comparison of fragmentation functions for light-quark- and gluon-dominated jets from pp and Pb+Pb collisions in ATLAS",
),
(
"Searches for third-generation scalar leptoquarks in $\\sqrt{s} = 13$ TeV $pp$ collisions with the ATLAS detector",
"Searches for third-generation scalar leptoquarks in √(s) = 13 TeV pp collisions with the ATLAS detector",
),
(
"Combinations of single-top-quark production cross-section measurements and $|f_{\\rm LV}V_{tb}|$ determinations at $\\sqrt{s}=7$ and 8 TeV with the ATLAS and CMS experiments",
"Combinations of single-top-quark production cross-section measurements and |f_LVV_tb| determinations at √(s) = 7 and 8 TeV with the ATLAS and CMS experiments",
),
(
"Measurement of the four-lepton invariant mass spectrum in 13 TeV proton-proton collisions with the ATLAS detector",
"Measurement of the four-lepton invariant mass spectrum in 13 TeV proton-proton collisions with the ATLAS detector",
),
(
"Measurement of $W^{\\pm}Z$ production cross sections and gauge boson polarisation in $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Measurement of W^±Z production cross sections and gauge boson polarisation in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Electron reconstruction and identification in the ATLAS experiment using the 2015 and 2016 LHC proton-proton collision data at $\\sqrt{s} = 13$ TeV",
"Electron reconstruction and identification in the ATLAS experiment using the 2015 and 2016 LHC proton-proton collision data at √(s) = 13 TeV",
),
(
"Search for long-lived neutral particles in $pp$ collisions at $\\sqrt{s} = 13$ TeV that decay into displaced hadronic jets in the ATLAS calorimeter",
"Search for long-lived neutral particles in pp collisions at √(s) = 13 TeV that decay into displaced hadronic jets in the ATLAS calorimeter",
),
(
"Search for heavy charged long-lived particles in the ATLAS detector in 31.6 fb$^{-1}$ of proton-proton collision data at $\\sqrt{s} = 13$ TeV",
"Search for heavy charged long-lived particles in the ATLAS detector in 31.6 fb⁻¹ of proton-proton collision data at √(s) = 13 TeV",
),
(
"Searches for scalar leptoquarks and differential cross-section measurements in dilepton-dijet events in proton-proton collisions at a centre-of-mass energy of $\\sqrt{s} = 13$ TeV with the ATLAS experiment",
"Searches for scalar leptoquarks and differential cross-section measurements in dilepton-dijet events in proton-proton collisions at a centre-of-mass energy of √(s) = 13 TeV with the ATLAS experiment",
),
(
"Search for low-mass resonances decaying into two jets and produced in association with a photon using $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Search for low-mass resonances decaying into two jets and produced in association with a photon using pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Dijet azimuthal correlations and conditional yields in $pp$ and $p$+Pb collisions at $\\sqrt{s_{\\rm NN}}$ = 5.02 TeV with the ATLAS detector",
"Dijet azimuthal correlations and conditional yields in pp and p+Pb collisions at √(s_NN) = 5.02 TeV with the ATLAS detector",
),
(
"Measurement of the ratio of cross sections for inclusive isolated-photon production in $pp$ collisions at $\\sqrt{s}=13$ and $8$ TeV with the ATLAS detector",
"Measurement of the ratio of cross sections for inclusive isolated-photon production in pp collisions at √(s) = 13 and 8 TeV with the ATLAS detector",
),
(
"Search for scalar resonances decaying into $\\mu^{+}\\mu^{-}$ in events with and without $b$-tagged jets produced in proton-proton collisions at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Search for scalar resonances decaying into μ⁺μ⁻ in events with and without b-tagged jets produced in proton-proton collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurement of the $t\\bar{t}Z$ and $t\\bar{t}W$ cross sections in proton-proton collisions at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Measurement of the tt̅Z and tt̅W cross sections in proton-proton collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Search for top-quark decays $t \\rightarrow Hq$ with 36 fb$^{-1}$ of $pp$ collision data at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Search for top-quark decays t → Hq with 36 fb⁻¹ of pp collision data at √(s) = 13 TeV with the ATLAS detector",
),
# ATLAS atlas_paper_feed
(
"Evidence for electroweak production of two jets in association with a $Z\\gamma$ pair in $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Evidence for electroweak production of two jets in association with a Zγ pair in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurement of the $t\\bar{t}$ production cross-section and lepton differential distributions in $e\\mu$ dilepton events from $pp$ collisions at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Measurement of the tt̅ production cross-section and lepton differential distributions in eμ dilepton events from pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Search for new resonances in mass distributions of jet pairs using 139 fb$^{-1}$ of $pp$ collisions at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Search for new resonances in mass distributions of jet pairs using 139 fb⁻¹ of pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Determination of jet calibration and energy resolution in proton-proton collisions at $\\sqrt{s}$ = 8 TeV using the ATLAS detector",
"Determination of jet calibration and energy resolution in proton-proton collisions at √(s) = 8 TeV using the ATLAS detector",
),
(
"Measurement of $J/\\psi$ production in association with a $W^\\pm$ boson with $pp$ data at 8 TeV",
"Measurement of J/ψ production in association with a W^± boson with pp data at 8 TeV",
),
(
"Search for the Higgs boson decays $H \\to ee$ and $H \\to e\\mu$ in $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS detector",
"Search for the Higgs boson decays H → ee and H → eμ in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Search for direct production of electroweakinos in final states with one lepton, missing transverse momentum and a Higgs boson decaying into two $b$-jets in $pp$ collisions at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Search for direct production of electroweakinos in final states with one lepton, missing transverse momentum and a Higgs boson decaying into two b-jets in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Search for squarks and gluinos in final states with same-sign leptons and jets using 139 fb$^{-1}$ of data collected with the ATLAS detector",
"Search for squarks and gluinos in final states with same-sign leptons and jets using 139 fb⁻¹ of data collected with the ATLAS detector",
),
(
"Combined measurements of Higgs boson production and decay using up to $80$ fb$^{-1}$ of proton-proton collision data at $\\sqrt{s}=$ 13 TeV collected with the ATLAS experiment",
"Combined measurements of Higgs boson production and decay using up to 80 fb⁻¹ of proton-proton collision data at √(s) = 13 TeV collected with the ATLAS experiment",
),
(
"Measurement of azimuthal anisotropy of muons from charm and bottom hadrons in $pp$ collisions at $\\sqrt{s}=13$ TeV with the ATLAS detector",
"Measurement of azimuthal anisotropy of muons from charm and bottom hadrons in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Search for light long-lived neutral particles produced in $pp$ collisions at $\\sqrt{s} =$ 13 TeV and decaying into collimated leptons or light hadrons with the ATLAS detector",
"Search for light long-lived neutral particles produced in pp collisions at √(s) = 13 TeV and decaying into collimated leptons or light hadrons with the ATLAS detector",
),
(
"Performance of electron and photon triggers in ATLAS during LHC Run 2",
"Performance of electron and photon triggers in ATLAS during LHC Run 2",
),
(
"Search for flavour-changing neutral currents in processes with one top quark and a photon using 81 fb$^{-1}$ of $pp$ collisions at $\\sqrt{s} = 13$ TeV with the ATLAS experiment",
"Search for flavour-changing neutral currents in processes with one top quark and a photon using 81 fb⁻¹ of pp collisions at √(s) = 13 TeV with the ATLAS experiment",
),
(
"Search for electroweak production of charginos and sleptons decaying into final states with two leptons and missing transverse momentum in $\\sqrt{s}=13$ TeV $pp$ collisions using the ATLAS detector",
"Search for electroweak production of charginos and sleptons decaying into final states with two leptons and missing transverse momentum in √(s) = 13 TeV pp collisions using the ATLAS detector",
),
(
"Measurements of top-quark pair differential and double-differential cross-sections in the $\\ell$+jets channel with $pp$ collisions at $\\sqrt{s}=13$ TeV using the ATLAS detector",
"Measurements of top-quark pair differential and double-differential cross-sections in the ℓ+jets channel with pp collisions at √(s) = 13 TeV using the ATLAS detector",
),
(
"Search for non-resonant Higgs boson pair production in the $bb\\ell\\nu\\ell\\nu$ final state with the ATLAS detector in $pp$ collisions at $\\sqrt{s} = 13$ TeV",
"Search for non-resonant Higgs boson pair production in the bbℓνℓν final state with the ATLAS detector in pp collisions at √(s) = 13 TeV",
),
(
"Measurement of angular and momentum distributions of charged particles within and around jets in Pb+Pb and $pp$ collisions at $\\sqrt{s_{\\mathrm{NN}}} = 5.02$ TeV with the ATLAS detector",
"Measurement of angular and momentum distributions of charged particles within and around jets in Pb+Pb and pp collisions at √(s_NN) = 5.02 TeV with the ATLAS detector",
),
(
"Search for bottom-squark pair production with the ATLAS detector in final states containing Higgs bosons, $b$-jets and missing transverse momentum",
"Search for bottom-squark pair production with the ATLAS detector in final states containing Higgs bosons, b-jets and missing transverse momentum",
),
(
"Measurement of the inclusive isolated-photon cross section in $pp$ collisions at $\\sqrt{s}=13$ TeV using 36 fb$^{-1}$ of ATLAS data",
"Measurement of the inclusive isolated-photon cross section in pp collisions at √(s) = 13 TeV using 36 fb⁻¹ of ATLAS data",
),
(
"Electron and photon performance measurements with the ATLAS detector using the 2015-2017 LHC proton-proton collision data",
"Electron and photon performance measurements with the ATLAS detector using the 2015-2017 LHC proton-proton collision data",
),
(
"Measurement of $K_S^0$ and $\\Lambda^0$ production in $t \\bar{t}$ dileptonic events in $pp$ collisions at $\\sqrt{s} =$ 7 TeV with the ATLAS detector",
"Measurement of K⁰_S and Λ⁰ production in tt̅ dileptonic events in pp collisions at √(s) = 7 TeV with the ATLAS detector",
),
(
"Measurement of $W^\\pm$ boson production in Pb+Pb collisions at $\\sqrt{s_\\mathrm{NN}} = 5.02$ TeV with the ATLAS detector",
"Measurement of W^± boson production in Pb+Pb collisions at √(s_NN) = 5.02 TeV with the ATLAS detector",
),
(
"Search for displaced vertices of oppositely charged leptons from decays of long-lived particles in $pp$ collisions at $\\sqrt{s}$ = 13 TeV with the ATLAS detector",
"Search for displaced vertices of oppositely charged leptons from decays of long-lived particles in pp collisions at √(s) = 13 TeV with the ATLAS detector",
),
(
"Measurement of the jet mass in high transverse momentum $Z(\\rightarrow b\\overline{b})\\gamma$ production at $\\sqrt{s}= 13$ TeV using the ATLAS detector",
"Measurement of the jet mass in high transverse momentum Z( → bb)γ production at √(s) = 13 TeV using the ATLAS detector",
),
(
"Measurement of the inclusive cross-section for the production of jets in association with a $Z$ boson in proton-proton collisions at 8 TeV using the ATLAS detector",
"Measurement of the inclusive cross-section for the production of jets in association with a Z boson in proton-proton collisions at 8 TeV using the ATLAS detector",
),
# ALICE alice_paper_feed
(
"One-dimensional charged kaon femtoscopy in p-Pb collisions at $\\sqrt{s_{\\rm NN}}$ = 5.02 TeV",
"One-dimensional charged kaon femtoscopy in p-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Investigations of anisotropic flow using multi-particle azimuthal correlations in pp, p$-$Pb, Xe$-$Xe, and Pb$-$Pb collisions at the LHC",
"Investigations of anisotropic flow using multi-particle azimuthal correlations in pp, p-Pb, Xe-Xe, and Pb-Pb collisions at the LHC",
),
(
"Multiplicity dependence of (anti-)deuteron production in pp collisions at $\\sqrt{s}$ = 7 TeV",
"Multiplicity dependence of (anti-)deuteron production in pp collisions at √(s) = 7 TeV",
),
(
"Calibration of the photon spectrometer PHOS of the ALICE experiment",
"Calibration of the photon spectrometer PHOS of the ALICE experiment",
),
(
"Measurement of D$^0$, D$^+$, D$^*$ and D$_s$ production in pp collisions at $\\sqrt{s}$ = 5.02 TeV",
"Measurement of D⁰, D⁺, D* and D_s production in pp collisions at √(s) = 5.02 TeV",
),
(
"Real-time data processing in the ALICE High Level Trigger at the LHC",
"Real-time data processing in the ALICE High Level Trigger at the LHC",
),
(
"Event-shape and multiplicity dependence of freeze-out radii in pp collisions at $\\sqrt{s}$ = 7 TeV",
"Event-shape and multiplicity dependence of freeze-out radii in pp collisions at √(s) = 7 TeV",
),
(
"Study of J/$\\psi$ azimuthal anisotropy at forward rapidity in Pb-Pb collisions at $\\sqrt{s_{\\rm NN}}$ = 5.02 TeV",
"Study of J/ψ azimuthal anisotropy at forward rapidity in Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Charged-particle pseudorapidity density at mid-rapidity in p-Pb collisions at $\\sqrt{s_{\\rm{NN}}}$ = 8.16 TeV",
"Charged-particle pseudorapidity density at mid-rapidity in p-Pb collisions at √(s_NN) = 8.16 TeV",
),
(
"Jet fragmentation transverse momentum measurements from di-hadron correlations in $\\sqrt{s}$ = 7 TeV pp and $\\sqrt{s_{\\rm{NN}}}$ = 5.02 TeV p-Pb collisions",
"Jet fragmentation transverse momentum measurements from di-hadron correlations in √(s) = 7 TeV pp and √(s_NN) = 5.02 TeV p-Pb collisions",
),
(
"$\\Lambda_{\\rm c}^{+}$ production in Pb-Pb collisions at $\\sqrt{s_{\\rm NN}}=5.02$ TeV",
"Λ⁺_c production in Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Event-shape engineering for the D-meson elliptic flow in mid-central Pb-Pb collisions at $\\sqrt{s_{\\rm NN}}=5.02$ TeV",
"Event-shape engineering for the D-meson elliptic flow in mid-central Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Energy dependence of exclusive $J/\\psi$ photoproduction off protons in ultra-peripheral p-Pb collisions at $\\sqrt{s_{\\rm{NN}}}$ = 5.02 TeV",
"Energy dependence of exclusive J/ψ photoproduction off protons in ultra-peripheral p-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Charged jet cross section and fragmentation in proton-proton collisions at $\\sqrt{s}$ = 7 TeV",
"Charged jet cross section and fragmentation in proton-proton collisions at √(s) = 7 TeV",
),
(
"Measuring $\\rm{K}^{0}\\rm{K}^{\\pm}$ interactions using pp collisions at $\\sqrt{s}$ = 7 TeV",
"Measuring K⁰K^± interactions using pp collisions at √(s) = 7 TeV",
),
(
"Multiplicity dependence of light-flavor hadron production in pp collisions at $\\sqrt{s}$ = 7 TeV",
"Multiplicity dependence of light-flavor hadron production in pp collisions at √(s) = 7 TeV",
),
(
"Medium modification of the shape of small-radius jets in central Pb-Pb collisions at $\\sqrt{s_{\\rm{NN}}}$ = 2.76 TeV",
"Medium modification of the shape of small-radius jets in central Pb-Pb collisions at √(s_NN) = 2.76 TeV",
),
(
"Measurement of dielectron production in central Pb-Pb collisions at $\\sqrt{{\\textit{s}}_{\\mathrm{NN}}}$ = 2.76 TeV",
"Measurement of dielectron production in central Pb-Pb collisions at √(s_NN) = 2.76 TeV",
),
(
"p-p, p-$\\Lambda$ and $\\Lambda$-$\\Lambda$ correlations studied via femtoscopy in pp reactions at $\\sqrt{s}$ = 7 TeV",
"p-p, p-Λ and Λ-Λ correlations studied via femtoscopy in pp reactions at √(s) = 7 TeV",
),
(
"Dielectron and heavy-quark production in inelastic and high-multiplicity proton-proton collisions at $\\sqrt{s} = 13$ TeV",
"Dielectron and heavy-quark production in inelastic and high-multiplicity proton-proton collisions at √(s) = 13 TeV",
),
(
"Centrality and pseudorapidity dependence of the charged-particle multiplicity density in Xe-Xe collisions at $\\sqrt{s_{\\rm NN}}$ = 5.44 TeV",
"Centrality and pseudorapidity dependence of the charged-particle multiplicity density in Xe-Xe collisions at √(s_NN) = 5.44 TeV",
),
(
"Azimuthal anisotropy of heavy-flavour decay electrons in p-Pb collisions at $\\sqrt{s_{NN}}$ = 5.02 TeV",
"Azimuthal anisotropy of heavy-flavour decay electrons in p-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Non-Flow and Flow studies with differential transverse momentum and number density correlations in p-Pb and Pb-Pb at LHC",
"Non-Flow and Flow studies with differential transverse momentum and number density correlations in p-Pb and Pb-Pb at LHC",
),
(
"Direct photon elliptic flow in Pb-Pb collisions at $\\sqrt{s_{NN}}$ = 2.76 TeV",
"Direct photon elliptic flow in Pb-Pb collisions at √(s_NN) = 2.76 TeV",
),
(
"Suppression of $\\Lambda(1520)$ resonance production in central Pb-Pb collisions at $\\sqrt{s_{NN}}$ = 2.76 TeV",
"Suppression of Λ(1520) resonance production in central Pb-Pb collisions at √(s_NN) = 2.76 TeV",
),
# ALICE alice_paper_feed
(
"Production of charged pions, kaons and (anti-)protons in Pb-Pb and inelastic pp collisions at $\\sqrt{s_{\\rm{NN}}}$ = 5.02 TeV",
"Production of charged pions, kaons and (anti-)protons in Pb-Pb and inelastic pp collisions at √(s_NN) = 5.02 TeV",
),
(
"Measurement of electrons from semileptonic heavy-flavour hadron decays at mid-rapidity in pp and Pb-Pb collisions at $\\sqrt{s_{\\rm{NN}}}$ = 5.02 TeV",
"Measurement of electrons from semileptonic heavy-flavour hadron decays at mid-rapidity in pp and Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Measurement of the (anti-)$^{3}$He elliptic flow in Pb-Pb collisions at $\\sqrt{s_{\\rm NN}}$ = 5.02 TeV",
"Measurement of the (anti-)^3He elliptic flow in Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Measurements of inclusive jet spectra in pp and central Pb–Pb collisions at $\\sqrt{s_{\\rm{NN}}}$ = 5.02 TeV",
"Measurements of inclusive jet spectra in pp and central Pb–Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Studies of J/$\\psi$ production at forward rapidity in Pb-Pb collisions at $\\sqrt{s_{\\rm NN}}$ = 5.02 TeV",
"Studies of J/ψ production at forward rapidity in Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Measurement of $\\Lambda$(1520) production in pp collisions at $\\sqrt{s}$ = 7 TeV and p-Pb collisions at $\\sqrt{s_{\\rm{NN}}}$ = 5.02 TeV",
"Measurement of Λ(1520) production in pp collisions at √(s) = 7 TeV and p-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Global polarization of $\\Lambda$ and $\\overline{\\Lambda}$ hyperons in Pb-Pb collisions at the LHC",
"Global polarization of Λ and Λ hyperons in Pb-Pb collisions at the LHC",
),
(
"Multiplicity dependence of (multi-)strange hadron production in proton-proton collisions at $\\sqrt{s}$ = 13 TeV",
"Multiplicity dependence of (multi-)strange hadron production in proton-proton collisions at √(s) = 13 TeV",
),
(
"$^{3}_{\\Lambda}\\mathrm{H}$ and $^{3}_{\\overline{\\Lambda}}\\mathrm{\\overline{H}}$ lifetime measurement in Pb-Pb collisions at \\newline $\\sqrt{s_{\\mathrm{NN}}} = $ 5.02 TeV via two-body decay",
"^3_ΛH and ^3_ΛH lifetime measurement in Pb-Pb collisions at √(s_NN) = 5.02 TeV via two-body decay",
),
(
"Measurement of Υ(1S) elliptic flow at forward rapidity in Pb-Pb collisions at $\\sqrt{s_{NN}}$ = 5.02TeV",
"Measurement of Υ(1S) elliptic flow at forward rapidity in Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Measurement of prompt D$^{0}$, D$^{+}$, D$^{∗+}$, and D$^{+}_{s}$ production in p$-$Pb collisions at $\\sqrt{s_{\\rm NN}}$ = 5.02 TeV",
"Measurement of prompt D⁰, D⁺, D*⁺, and D⁺_s production in p-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Multiplicity dependence of light (anti-)nuclei production in p-Pb collisions at $\\sqrt{s_{\\rm{NN}}}$ = 5.02 TeV",
"Multiplicity dependence of light (anti-)nuclei production in p-Pb collisions at √(s_NN) = 5.02 TeV",
),
(
"Scattering studies with low-energy kaon-proton femtoscopy in proton-proton collisions at the LHC",
"Scattering studies with low-energy kaon-proton femtoscopy in proton-proton collisions at the LHC",
),
(
"Measurement of the inclusive isolated photon production cross section in pp collisions at $\\sqrt{s}$ = 7 TeV",
"Measurement of the inclusive isolated photon production cross section in pp collisions at √(s) = 7 TeV",
),
(
"Inclusive J/$\\psi$ production at mid-rapidity in pp collisions at $\\sqrt{s}$ = 5.02 TeV",
"Inclusive J/ψ production at mid-rapidity in pp collisions at √(s) = 5.02 TeV",
),
(
"Study of the $\\Lambda$-$\\Lambda$ interaction with femtoscopy correlations in pp and p-Pb collisions at the LHC",
"Study of the Λ-Λ interaction with femtoscopy correlations in pp and p-Pb collisions at the LHC",
),
(
"Charged-particle production as a function of multiplicity and transverse spherocity in pp collisions at $\\sqrt{s} =5.02$ and 13 TeV",
"Charged-particle production as a function of multiplicity and transverse spherocity in pp collisions at √(s) = 5.02 and 13 TeV",
),
(
"Exploration of jet substructure using iterative declustering in pp and Pb-Pb collisions at LHC energies",
"Exploration of jet substructure using iterative declustering in pp and Pb-Pb collisions at LHC energies",
),
(
"Measurement of the production of charm jets tagged with D$^{0}$ mesons in pp collisions at $\\sqrt{s}$= 7 TeV",
"Measurement of the production of charm jets tagged with D⁰ mesons in pp collisions at √(s) = 7 TeV",
),
(
"First observation of an attractive interaction between a proton and a multi-strange baryon",
"First observation of an attractive interaction between a proton and a multi-strange baryon",
),
(
"Measurement of jet radial profiles in Pb$-$Pb collisions at $\\sqrt{s_{\\rm NN}}$ = 2.76 TeV",
"Measurement of jet radial profiles in Pb-Pb collisions at √(s_NN) = 2.76 TeV",
),
(
"Production of muons from heavy-flavour hadron decays in pp collisions at $\\sqrt{s}=5.02$ TeV",
"Production of muons from heavy-flavour hadron decays in pp collisions at √(s) = 5.02 TeV",
),
(
"Measurement of charged jet cross section in pp collisions at $\\sqrt{s}=5.02$ TeV",
"Measurement of charged jet cross section in pp collisions at √(s) = 5.02 TeV",
),
(
"Coherent J/$\\psi$ photoproduction at forward rapidity in ultra-peripheral Pb-Pb collisions at $\\sqrt{s_{\\rm{NN}}}=5.02$ TeV",
"Coherent J/ψ photoproduction at forward rapidity in ultra-peripheral Pb-Pb collisions at √(s_NN) = 5.02 TeV",
),
# LHCb lhcb_paper_feed
(
"Measurements of $CP$ asymmetries in charmless four-body $\\Lambda^0_b$ and $\\Xi_b^0$ decays",
"Measurements of CP asymmetries in charmless four-body Λ⁰_b and Ξ⁰_b decays",
),
(
"Observation of an excited $B_c^+$ state",
"Observation of an excited B⁺_c state",
),
(
"Near-threshold $D\\bar{D}$ spectroscopy and observation of a new charmonium state",
"Near-threshold DD̅ spectroscopy and observation of a new charmonium state",
),
(
"Search for lepton-universality violation in $B^+\\to K^+\\ell^+\\ell^-$ decays",
"Search for lepton-universality violation in B⁺ → K⁺ℓ⁺ℓ⁻ decays",
),
(
"Observation of $C\\!P$ violation in charm decays",
"Observation of CP violation in charm decays",
),
(
"Measurement of the $CP$-violating phase $\\phi_s$ from $B_{s}^{0}\\to J/\\psi\\pi^+\\pi⁻$ decays in 13 TeV $pp$ collisions",
"Measurement of the CP-violating phase ϕ_s from B⁰_s → J/ψπ⁺π⁻ decays in 13 TeV pp collisions",
),
(
"Measurement of the mass difference between neutral charm-meson eigenstates",
"Measurement of the mass difference between neutral charm-meson eigenstates",
),
(
"Search for $CP$ violation in $D^+_s\\to K_S^0\\pi^+$, $D^+\\to K_S^0K^+$ and $D^+\\to\\phi\\pi^+$ decays",
"Search for CP violation in D⁺_s → K⁰_S π⁺, D⁺ → K⁰_S K⁺ and D⁺ → ϕπ⁺ decays",
),
(
"Amplitude analysis of $B^{0}_{s} \\rightarrow K^{0}_{\\textrm{S}} K^{\\pm}\\pi^{\\mp}$ decays",
"Amplitude analysis of B⁰_s → K⁰_S K^±π^∓ decays",
),
(
"Measurement of $b$-hadron fractions in 13 TeV $pp$ collisions",
"Measurement of b-hadron fractions in 13 TeV pp collisions",
),
(
"Dalitz Plot analysis of the $D^+ \\to K^- K^+ K^+$ decay",
"Dalitz Plot analysis of the D⁺ → K⁻ K⁺ K⁺ decay",
),
(
"Observation of $B^0_{(s)} \\to J/\\psi p \\overline{p}$ decays and precision measurements of the $B^0_{(s)}$ masses",
"Observation of B⁰_s → J/ψ pp decays and precision measurements of the B⁰_s masses",
),
(
"Measurement of $B^+$, $B^0$ and $\\Lambda_b^0$ production in $p\\mkern 1mu\\mathrm{Pb}$ collisions at $\\sqrt{s_{NN}} = 8.16 \\ \\rm TeV$",
"Measurement of B⁺, B⁰ and Λ⁰_b production in p 1muPb collisions at √(s_NN) = 8.16 TeV",
),
(
"Measurement of the ratio of branching fractions of the decays $\\Lambda_b^0 \\!\\to \\psi(2S) \\Lambda$ and $\\Lambda_b^0 \\!\\to J\\!/\\!\\psi \\Lambda$",
"Measurement of the ratio of branching fractions of the decays Λ⁰_b → ψ(2S) Λ and Λ⁰_b → J/ψΛ",
),
(
"Measurement of the mass and production rate of $\\Xi_b^-$ baryons",
"Measurement of the mass and production rate of Ξ⁻_b baryons",
),
(
"Model-independent observation of exotic contributions to $B^0\\to J/\\psi K^+\\pi^-$ decays",
"Model-independent observation of exotic contributions to B⁰ → J/ψ K⁺π⁻ decays",
),
(
"Measurement of the branching fraction and $C\\!P$ asymmetry in $B^{+}\\rightarrow J/\\psi \\rho^{+}$ decays",
"Measurement of the branching fraction and CP asymmetry in B⁺ → J/ψρ⁺ decays",
),
(
"Search for the rare decay $B^{+} \\rightarrow \\mu^{+}\\mu^{-}\\mu^{+}\\nu_{\\mu}$",
"Search for the rare decay B⁺ → μ⁺μ⁻μ⁺ν_μ",
),
(
"Study of the $B^0\\to \\rho(770)^0 K^*(892)^0$ decay with an amplitude analysis of $B^0\\to (\\pi^+\\pi^-) (K^+\\pi^-)$ decays",
"Study of the B⁰ → ρ⁰(770) K*⁰(892) decay with an amplitude analysis of B⁰ → (π⁺π⁻) (K⁺π⁻) decays",
),
(
"Search for $CP$ violation through an amplitude analysis of $D^0\\rightarrow K^+ K^- \\pi^+ \\pi^-$ decays",
"Search for CP violation through an amplitude analysis of D⁰ → K⁺ K⁻ π⁺ π⁻ decays",
),
(
"First measurement of charm production in fixed-target configuration at the LHC",
"First measurement of charm production in fixed-target configuration at the LHC",
),
(
"Study of $\\Upsilon$ production in $p$Pb collisions at $\\sqrt{s_{NN}}=8.16$ TeV",
"Study of Υ production in pPb collisions at √(s_NN) = 8.16 TeV",
),
(
"Measurement of the charm-mixing parameter $y_{CP}$",
"Measurement of the charm-mixing parameter y_CP",
),
(
"Measurement of the branching fractions of the decays $D^+\\rightarrow K^-K^+K^+$, $D^+\\rightarrow \\pi^-\\pi^+K^+$ and $D^+_s\\rightarrow\\pi^-K^+K^+$",
"Measurement of the branching fractions of the decays D⁺ → K⁻K⁺K⁺, D⁺ → π⁻π⁺K⁺ and D⁺_s → π⁻K⁺K⁺",
),
(
"Observation of two resonances in the $\\Lambda_b^0 \\pi^\\pm$ systems and precise measurement of $\\Sigma_b^\\pm$ and $\\Sigma_b^{*\\pm}$ properties",
"Observation of two resonances in the Λ⁰_b π^± systems and precise measurement of Σ^±_b and Σ*^±_b properties",
),
# LHCb lhcb_conf_feed
(
"Prospects for searches for long-lived particles after the LHCb detector upgrades",
"Prospects for searches for long-lived particles after the LHCb detector upgrades",
),
(
"LHCb projections for proton-lead collisions during LHC Runs 3 and 4",
"LHCb projections for proton-lead collisions during LHC Runs 3 and 4",
),
(
"Measurement of $B^+$, $B^0$ and $\\Lambda⁰_b$ production and nuclear modification in $p$Pb collisions at $\\sqrt{s_\\mathrm{NN}}=8.16 ~~\\text {TeV}$",
"Measurement of B⁺, B⁰ and Λ⁰_b production and nuclear modification in pPb collisions at √(s_NN) = 8.16 TeV",
),
(
"Study of coherent $J/\\psi$ production in lead-lead collisions at $\\sqrt{s_{\\rm NN}} =5\\ \\rm{TeV}$ with the LHCb experiment",
"Study of coherent J/ψ production in lead-lead collisions at √(s_NN) = 5 TeV with the LHCb experiment",
),
(
"Update of the LHCb combination of the CKM angle $\\gamma$",
"Update of the LHCb combination of the CKM angle γ",
),
(
"Measurement of CP violation in the $B_s^0 \\to \\phi \\phi$ decay and search for the $B^0 \\to \\phi\\phi$ decay",
"Measurement of CP violation in the B⁰_s → ϕϕ decay and search for the B⁰ → ϕϕ decay",
),
(
"Prompt $\\Lambda^+_{\\mathrm{c}}$ production in $p\\mathrm{Pb}$ collisions at $\\sqrt{s_{_{\\mathrm{NN}}}} = 5.02\\mathrm{\\,Te\\kern -0.1em V}$",
"Prompt Λ⁺_c production in pPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Update of the LHCb combination of the CKM angle $\\gamma$ using $B\\to DK$ decays",
"Update of the LHCb combination of the CKM angle γ using B → DK decays",
),
(
"Measurement of antiproton production in $p$He collisions at $\\sqrt{s_{\\scriptscriptstyle\\rm NN}}=110$ GeV",
"Measurement of antiproton production in pHe collisions at √(s_NN) = 110 GeV",
),
(
"Measurement of $J/\\psi$ and $D^0$ production in $p$Ar collisions at $\\sqrt{s_{NN}}=110$ GeV",
"Measurement of J/ψ and D⁰ production in pAr collisions at √(s_NN) = 110 GeV",
),
(
"Measurement of time-dependent $C\\!P$-violating asymmetries in $B^0\\to\\pi^+\\pi^-$ and $B_s^0\\to K^+K^-$ decays at LHCb",
"Measurement of time-dependent CP-violating asymmetries in B⁰ → π⁺π⁻ and B⁰_s → K⁺K⁻ decays at LHCb",
),
(
"First observation of a baryonic $B_s^0$ decay",
"First observation of a baryonic B⁰_s decay",
),
(
"Measurement of $C\\!P$ asymmetry in $B_s^0\\to D_s^{\\mp}K^{\\pm}$ decays",
"Measurement of CP asymmetry in B⁰_s → D_s^∓K^± decays",
),
(
"Study of the decay $B^{\\pm} \\to DK^{*\\pm}$ with two-body $D$ decays",
"Study of the decay B^± → DK*^± with two-body D decays",
),
(
"Evidence for the rare decay $\\Sigma^+ \\to p \\mu^+ \\mu^-$",
"Evidence for the rare decay Σ⁺ → p μ⁺ μ⁻",
),
(
"Updated limit for the decay $K_{\\rm\\scriptscriptstyle S}^0\\rightarrow\\mu^+\\mu^-$",
"Updated limit for the decay K⁰_S → μ⁺μ⁻",
),
(
"Search for the rare decays $B^0_{(s)}\\to\\tau^+\\tau^-$",
"Search for the rare decays B⁰_s → τ⁺τ⁻",
),
(
"$CP$-violating asymmetries from the decay-time distribution of prompt $D^0 \\to K^+ K^-$ and $D^0 \\to \\pi^+\\pi^-$ decays in the full $\\mbox{LHCb}$ Run 1 data sample. Measurement using unbinned, acceptance corrected decay-time.",
"CP-violating asymmetries from the decay-time distribution of prompt D⁰ → K⁺ K⁻ and D⁰ → π⁺π⁻ decays in the full LHCb Run 1 data sample. Measurement using unbinned, acceptance corrected decay-time.",
),
(
"$CP$-violating asymmetries from the decay-time distribution of prompt $D^0 \\to K^+K^-$ and $D^0 \\to \\pi^+\\pi^-$ decays in the full LHCb Run~1 data sample. Measurement using yield asymmetries in bins of decay time.",
"CP-violating asymmetries from the decay-time distribution of prompt D⁰ → K⁺K⁻ and D⁰ → π⁺π⁻ decays in the full LHCb Run 1 data sample. Measurement using yield asymmetries in bins of decay time.",
),
(
"Dalitz plot analysis of the $D^+ \\rightarrow K^- K^+ K^+$ decay with the isobar model",
"Dalitz plot analysis of the D⁺ → K⁻ K⁺ K⁺ decay with the isobar model",
),
(
"Central exclusive production of $J/\\psi$ and $\\psi(2S)$ mesons in pp collisions at $\\sqrt{s}=13$ TeV",
"Central exclusive production of J/ψ and ψ(2S) mesons in pp collisions at √(s) = 13 TeV",
),
(
"Search for $H^0 \\rightarrow b \\bar{b}$ or $c \\bar{c}$ in association with a $W$ or $Z$ boson in the forward region of $pp$ collisions",
"Search for H⁰ → bb̅ or cc̅ in association with a W or Z boson in the forward region of pp collisions",
),
(
"LHCb dimuon and charm mass distributions",
"LHCb dimuon and charm mass distributions",
),
(
"Search for structure in the $B_s^0\\pi^\\pm$ invariant mass spectrum",
"Search for structure in the B⁰_s π^± invariant mass spectrum",
),
(
"Study of cold nuclear matter effects using prompt $D^0$ meson production in $p\\mathrm{Pb}$ collisions at LHCb",
"Study of cold nuclear matter effects using prompt D⁰ meson production in pPb collisions at LHCb",
),
# LHCb lhcb_paper_feed
(
"Search for $A' \\to \\mu^+ \\mu^-$ decays",
"Search for A' → μ⁺ μ⁻ decays",
),
(
"Search for the doubly charmed baryon $\\Xi_{cc}^{+}$",
"Search for the doubly charmed baryon Ξ⁺_cc",
),
(
"Amplitude analysis of the $B^+ \\to \\pi^+ \\pi^+ \\pi^-$ decay",
"Amplitude analysis of the B⁺ → π⁺ π⁺ π⁻ decay",
),
(
"Observation of several sources of $CP$ violation in $B^+ \\to \\pi^+ \\pi^+ \\pi^-$ decays",
"Observation of several sources of CP violation in B⁺ → π⁺ π⁺ π⁻ decays",
),
(
"Measurement of $\\psi(2S)$ production cross-sections in proton-proton collisions at $\\sqrt{s} = 7$ and 13 TeV",
"Measurement of ψ(2S) production cross-sections in proton-proton collisions at √(s) = 7 and 13 TeV",
),
(
"Measurement of CP violation in the $B_s^0\\rightarrow\\phi\\phi$ decay and search for the $B^0\\rightarrow\\phi\\phi$ decay",
"Measurement of CP violation in the B⁰_s → ϕϕ decay and search for the B⁰ → ϕϕ decay",
),
(
"Precision measurement of the $\\Lambda_c^+$, $\\Xi_c^+$ and $\\Xi_c^0$ baryon lifetimes",
"Precision measurement of the Λ⁺_c, Ξ⁺_c and Ξ⁰_c baryon lifetimes",
),
(
"Observation of the $\\Lambda_b^0\\rightarrow \\chi_{c1}(3872)pK^-$ decay",
"Observation of the Λ⁰_b → χ_c1(3872)pK⁻ decay",
),
(
"Updated measurement of time-dependent CP-violating observables in $B^0_s \\to J/\\psi K^+K^-$ decays",
"Updated measurement of time-dependent CP-violating observables in B⁰_s → J/ψ K⁺K⁻ decays",
),
(
"Measurement of $C\\!P$ observables in the process $B^0 \\to DK^{*0}$ with two- and four-body $D$ decays",
"Measurement of CP observables in the process B⁰ → DK*⁰ with two- and four-body D decays",
),
(
"Amplitude analysis of $B^\\pm \\to \\pi^\\pm K^+ K^-$ decays",
"Amplitude analysis of B^± → π^± K⁺ K⁻ decays",
),
(
"Search for the lepton-flavour-violating decays $B^{0}_{s}\\to\\tau^{\\pm}\\mu^{\\mp}$ and $B^{0}\\to\\tau^{\\pm}\\mu^{\\mp}$",
"Search for the lepton-flavour-violating decays B⁰_s → τ^±μ^∓ and B⁰ → τ^±μ^∓",
),
(
"Amplitude analysis of the $B^0_{(s)} \\to K^{*0} \\overline{K}^{*0}$ decays and measurement of the branching fraction of the $B^0 \\to K^{*0} \\overline{K}^{*0}$ decay",
"Amplitude analysis of the B⁰_s → K*⁰K*⁰ decays and measurement of the branching fraction of the B⁰ → K*⁰K*⁰ decay",
),
(
"Measurement of $CP$-violating and mixing-induced observables in $B_s^0 \\to \\phi\\gamma$ decays",
"Measurement of CP-violating and mixing-induced observables in B⁰_s → ϕγ decays",
),
(
"A search for $\\it{\\Xi}^{++}_{cc} \\rightarrow D^{+} p K^{-} \\pi^{+}$ decays",
"A search for Ξ⁺⁺_cc → D⁺ p K⁻π⁺ decays",
),
(
"Measurement of charged hadron production in $Z$-tagged jets in proton-proton collisions at $\\sqrt{s}=8$ TeV",
"Measurement of charged hadron production in Z-tagged jets in proton-proton collisions at √(s) = 8 TeV",
),
(
"Observation of a narrow pentaquark state, $P_c(4312)^+$, and of two-peak structure of the $P_c(4450)^+$",
"Observation of a narrow pentaquark state, P⁺_c(4312), and of two-peak structure of the P⁺_c(4450)",
),
(
"Measurements of $CP$ asymmetries in charmless four-body $\\Lambda^0_b$ and $\\Xi_b^0$ decays",
"Measurements of CP asymmetries in charmless four-body Λ⁰_b and Ξ⁰_b decays",
),
(
"Observation of an excited $B_c^+$ state",
"Observation of an excited B⁺_c state",
),
(
"Near-threshold $D\\bar{D}$ spectroscopy and observation of a new charmonium state",
"Near-threshold DD̅ spectroscopy and observation of a new charmonium state",
),
(
"Search for lepton-universality violation in $B^+\\to K^+\\ell^+\\ell^-$ decays",
"Search for lepton-universality violation in B⁺ → K⁺ℓ⁺ℓ⁻ decays",
),
(
"Observation of $C\\!P$ violation in charm decays",
"Observation of CP violation in charm decays",
),
(
"Measurement of the $CP$-violating phase $\\phi_s$ from $B_{s}^{0}\\to J/\\psi\\pi^+\\pi^-$ decays in 13 TeV $pp$ collisions",
"Measurement of the CP-violating phase ϕ_s from B⁰_s → J/ψπ⁺π⁻ decays in 13 TeV pp collisions",
),
(
"Measurement of the mass difference between neutral charm-meson eigenstates",
"Measurement of the mass difference between neutral charm-meson eigenstates",
),
(
"Search for $CP$ violation in $D^+_s\\to K_S^0\\pi^+$, $D^+\\to K_S^0K^+$ and $D^+\\to\\phi\\pi^+$ decays",
"Search for CP violation in D⁺_s → K⁰_S π⁺, D⁺ → K⁰_S K⁺ and D⁺ → ϕπ⁺ decays",
),
# LHCb lhcb_conf_feed
(
"Strong constraints on the $K^0_s \\to \\mu^+ \\mu^-$ branching fraction",
"Strong constraints on the K⁰_s → μ⁺ μ⁻ branching fraction",
),
(
"Search for time-dependent $CP$ violation in $D^0 \\to K^+ K^-$ and $D^0 \\to \\pi^+ \\pi^-$ decays",
"Search for time-dependent CP violation in D⁰ → K⁺ K⁻ and D⁰ → π⁺ π⁻ decays",
),
(
"Prospects for searches for long-lived particles after the LHCb detector upgrades",
"Prospects for searches for long-lived particles after the LHCb detector upgrades",
),
(
"LHCb projections for proton-lead collisions during LHC Runs 3 and 4",
"LHCb projections for proton-lead collisions during LHC Runs 3 and 4",
),
(
"Measurement of $B^+$, $B^0$ and $\\Lambda_b^0$ production and nuclear modification in $p$Pb collisions at $\\sqrt{s_\\mathrm{NN}}=8.16 ~~\\text {TeV}$",
"Measurement of B⁺, B⁰ and Λ⁰_b production and nuclear modification in pPb collisions at √(s_NN) = 8.16 TeV",
),
(
"Study of coherent $J/\\psi$ production in lead-lead collisions at $\\sqrt{s_{\\rm NN}} =5\\ \\rm{TeV}$ with the LHCb experiment",
"Study of coherent J/ψ production in lead-lead collisions at √(s_NN) = 5 TeV with the LHCb experiment",
),
(
"Update of the LHCb combination of the CKM angle $\\gamma$",
"Update of the LHCb combination of the CKM angle γ",
),
(
"Measurement of CP violation in the $B_s^0 \\to \\phi \\phi$ decay and search for the $B^0 \\to \\phi\\phi$ decay",
"Measurement of CP violation in the B⁰_s → ϕϕ decay and search for the B⁰ → ϕϕ decay",
),
(
"Prompt $\\Lambda^+_{\\mathrm{c}}$ production in $p\\mathrm{Pb}$ collisions at $\\sqrt{s_{_{\\mathrm{NN}}}} = 5.02\\mathrm{\\,Te\\kern -0.1em V}$",
"Prompt Λ⁺_c production in pPb collisions at √(s_NN) = 5.02 TeV",
),
(
"Update of the LHCb combination of the CKM angle $\\gamma$ using $B\\to DK$ decays",
"Update of the LHCb combination of the CKM angle γ using B → DK decays",
),
(
"Measurement of antiproton production in $p$He collisions at $\\sqrt{s_{\\scriptscriptstyle\\rm NN}}=110$ GeV",
"Measurement of antiproton production in pHe collisions at √(s_NN) = 110 GeV",
),
(
"Measurement of $J/\\psi$ and $D^0$ production in $p$Ar collisions at $\\sqrt{s_{NN}}=110$ GeV",
"Measurement of J/ψ and D⁰ production in pAr collisions at √(s_NN) = 110 GeV",
),
(
"Measurement of time-dependent $C\\!P$-violating asymmetries in $B^0\\to\\pi^+\\pi^-$ and $B_s^0\\to K^+K^-$ decays at LHCb",
"Measurement of time-dependent CP-violating asymmetries in B⁰ → π⁺π⁻ and B⁰_s → K⁺K⁻ decays at LHCb",
),
(
"First observation of a baryonic $B_s^0$ decay",
"First observation of a baryonic B⁰_s decay",
),
(
"Measurement of $C\\!P$ asymmetry in $B_s^0\\to D_s^{\\mp}K^{\\pm}$ decays",
"Measurement of CP asymmetry in B⁰_s → D_s^∓K^± decays",
),
(
"Study of the decay $B^{\\pm} \\to DK^{*\\pm}$ with two-body $D$ decays",
"Study of the decay B^± → DK*^± with two-body D decays",
),
(
"Evidence for the rare decay $\\Sigma^+ \\to p \\mu^+ \\mu^-$",
"Evidence for the rare decay Σ⁺ → p μ⁺ μ⁻",
),
(
"Updated limit for the decay $K_{\\rm\\scriptscriptstyle S}^0\\rightarrow\\mu^+\\mu^-$",
"Updated limit for the decay K⁰_S → μ⁺μ⁻",
),
(
"Search for the rare decays $B^0_{(s)}\\to\\tau^+\\tau^-$",
"Search for the rare decays B⁰_s → τ⁺τ⁻",
),
(
"$CP$-violating asymmetries from the decay-time distribution of prompt $D^0 \\to K^+ K^-$ and $D^0 \\to \\pi^+\\pi^-$ decays in the full $\\mbox{LHCb}$ Run 1 data sample. Measurement using unbinned, acceptance corrected decay-time.",
"CP-violating asymmetries from the decay-time distribution of prompt D⁰ → K⁺ K⁻ and D⁰ → π⁺π⁻ decays in the full LHCb Run 1 data sample. Measurement using unbinned, acceptance corrected decay-time.",
),
(
"$CP$-violating asymmetries from the decay-time distribution of prompt $D^0 \\to K^+K^-$ and $D^0 \\to \\pi^+\\pi^-$ decays in the full LHCb Run~1 data sample. Measurement using yield asymmetries in bins of decay time.",
"CP-violating asymmetries from the decay-time distribution of prompt D⁰ → K⁺K⁻ and D⁰ → π⁺π⁻ decays in the full LHCb Run 1 data sample. Measurement using yield asymmetries in bins of decay time.",
),
(
"Dalitz plot analysis of the $D^+ \\rightarrow K^- K^+ K^+$ decay with the isobar model",
"Dalitz plot analysis of the D⁺ → K⁻ K⁺ K⁺ decay with the isobar model",
),
(
"Central exclusive production of $J/\\psi$ and $\\psi(2S)$ mesons in pp collisions at $\\sqrt{s}=13$ TeV",
"Central exclusive production of J/ψ and ψ(2S) mesons in pp collisions at √(s) = 13 TeV",
),
(
"Search for $H^0 \\rightarrow b \\bar{b}$ or $c \\bar{c}$ in association with a $W$ or $Z$ boson in the forward region of $pp$ collisions",
"Search for H⁰ → bb̅ or cc̅ in association with a W or Z boson in the forward region of pp collisions",
),
],
)
def test_formatting(self, input_title, expected):
"""Test the list above."""
new_title = cds_paper_bot.format_title(input_title)
assert new_title == expected
| 72.020117
| 257
| 0.58672
| 15,198
| 110,983
| 4.29925
| 0.047638
| 0.070891
| 0.02213
| 0.036853
| 0.941613
| 0.925849
| 0.90326
| 0.880992
| 0.854683
| 0.811785
| 0
| 0.021373
| 0.315769
| 110,983
| 1,540
| 258
| 72.066883
| 0.831534
| 0.004253
| 0
| 0.407505
| 0
| 0.331797
| 0.758936
| 0.028358
| 0
| 0
| 0
| 0
| 0.000658
| 1
| 0.000658
| false
| 0
| 0.002633
| 0
| 0.00395
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c194d6fcaa53ad0901bbd02c0843287e5286c3f9
| 5,805
|
py
|
Python
|
xbox/webapi/api/provider/screenshots/__init__.py
|
Landcross/xbox-webapi-python
|
0e3f01254907d929fc52f843a5f3bf53ef2ba876
|
[
"MIT"
] | 122
|
2018-03-17T05:20:35.000Z
|
2022-03-30T23:30:14.000Z
|
xbox/webapi/api/provider/screenshots/__init__.py
|
Landcross/xbox-webapi-python
|
0e3f01254907d929fc52f843a5f3bf53ef2ba876
|
[
"MIT"
] | 62
|
2018-03-27T14:17:11.000Z
|
2022-03-30T16:36:03.000Z
|
xbox/webapi/api/provider/screenshots/__init__.py
|
Landcross/xbox-webapi-python
|
0e3f01254907d929fc52f843a5f3bf53ef2ba876
|
[
"MIT"
] | 38
|
2018-05-09T19:17:48.000Z
|
2022-02-03T06:55:04.000Z
|
"""
Screenshots - Get screenshot info
"""
from xbox.webapi.api.provider.baseprovider import BaseProvider
from xbox.webapi.api.provider.screenshots.models import ScreenshotResponse
class ScreenshotsProvider(BaseProvider):
SCREENSHOTS_METADATA_URL = "https://screenshotsmetadata.xboxlive.com"
HEADERS_SCREENSHOTS_METADATA = {"x-xbl-contract-version": "5"}
async def get_recent_community_screenshots_by_title_id(
self, title_id: str, **kwargs
) -> ScreenshotResponse:
"""
Get recent community screenshots by Title Id
Args:
title_id: Title Id to get screenshots for
Returns:
:class:`ScreenshotResponse`: Screenshot Response
"""
url = self.SCREENSHOTS_METADATA_URL + f"/public/titles/{title_id}/screenshots"
params = {"qualifier": "created"}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_SCREENSHOTS_METADATA, **kwargs
)
resp.raise_for_status()
return ScreenshotResponse.parse_raw(await resp.text())
async def get_recent_own_screenshots(
self, title_id: str = None, skip_items: int = 0, max_items: int = 25, **kwargs
) -> ScreenshotResponse:
"""
Get own recent screenshots, optionally filter for title Id
Args:
title_id: Title ID to filter
skip_items: Item count to skip
max_items: Maximum item count to load
Returns:
:class:`ScreenshotResponse`: Screenshot Response
"""
url = self.SCREENSHOTS_METADATA_URL + "/users/me"
if title_id:
url += f"/titles/{title_id}"
url += "/screenshots"
params = {"skipItems": skip_items, "maxItems": max_items}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_SCREENSHOTS_METADATA, **kwargs
)
resp.raise_for_status()
return ScreenshotResponse.parse_raw(await resp.text())
async def get_recent_screenshots_by_xuid(
self,
xuid: str,
title_id: str = None,
skip_items: int = 0,
max_items: int = 25,
**kwargs,
) -> ScreenshotResponse:
"""
Get recent screenshots by XUID, optionally filter for title Id
Args:
xuid: XUID of user to get screenshots from
title_id: Optional title id filter
skip_items: Item count to skip
max_items: Maximum item count to load
Returns:
:class:`ScreenshotResponse`: Screenshot Response
"""
url = self.SCREENSHOTS_METADATA_URL + f"/users/xuid({xuid})"
if title_id:
url += f"/titles/{title_id}"
url += "/screenshots"
params = {"skipItems": skip_items, "maxItems": max_items}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_SCREENSHOTS_METADATA, **kwargs
)
resp.raise_for_status()
return ScreenshotResponse.parse_raw(await resp.text())
async def get_saved_community_screenshots_by_title_id(
self, title_id: str, **kwargs
) -> ScreenshotResponse:
"""
Get saved community screenshots by Title Id
Args:
title_id: Title Id to get screenshots for
Returns:
:class:`ScreenshotResponse`: Screenshot Response
"""
url = f"{self.SCREENSHOTS_METADATA_URL}/public/titles/{title_id}/screenshots/saved"
params = {"qualifier": "created"}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_SCREENSHOTS_METADATA, **kwargs
)
resp.raise_for_status()
return ScreenshotResponse.parse_raw(await resp.text())
async def get_saved_own_screenshots(
self, title_id: str = None, skip_items: int = 0, max_items: int = 25, **kwargs
) -> ScreenshotResponse:
"""
Get own saved screenshots, optionally filter for title Id an
Args:
title_id: Optional Title ID to filter
skip_items: Item count to skip
max_items: Maximum item count to load
Returns:
:class:`ScreenshotResponse`: Screenshot Response
"""
url = self.SCREENSHOTS_METADATA_URL + "/users/me"
if title_id:
url += f"/titles/{title_id}"
url += "/screenshots/saved"
params = {"skipItems": skip_items, "maxItems": max_items}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_SCREENSHOTS_METADATA, **kwargs
)
resp.raise_for_status()
return ScreenshotResponse.parse_raw(await resp.text())
async def get_saved_screenshots_by_xuid(
self,
xuid: str,
title_id: str = None,
skip_items: int = 0,
max_items: int = 25,
**kwargs,
) -> ScreenshotResponse:
"""
Get saved screenshots by XUID, optionally filter for title Id
Args:
xuid: XUID of user to get screenshots from
title_id: Optional title id filter
skip_items: Item count to skip
max_items: Maximum item count to load
Returns:
:class:`ScreenshotResponse`: Screenshot Response
"""
url = self.SCREENSHOTS_METADATA_URL + f"/users/xuid({xuid})"
if title_id:
url += f"/titles/{title_id}"
url += "/screenshots/saved"
params = {"skipItems": skip_items, "maxItems": max_items}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_SCREENSHOTS_METADATA, **kwargs
)
resp.raise_for_status()
return ScreenshotResponse.parse_raw(await resp.text())
| 34.760479
| 91
| 0.618605
| 644
| 5,805
| 5.392857
| 0.128882
| 0.07256
| 0.025338
| 0.069105
| 0.915059
| 0.8759
| 0.856608
| 0.850849
| 0.845667
| 0.845667
| 0
| 0.003145
| 0.288028
| 5,805
| 166
| 92
| 34.96988
| 0.837164
| 0.005685
| 0
| 0.781609
| 0
| 0
| 0.116873
| 0.033645
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022989
| 0
| 0.126437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1bc1cd3407ed26127c070ac914737e0849d27ad
| 132
|
py
|
Python
|
mortgage_filter/__init__.py
|
lukavuko/mortgage-filter-package
|
187d771c441f93b6a5dd2c5bf67ee519d1888430
|
[
"MIT"
] | null | null | null |
mortgage_filter/__init__.py
|
lukavuko/mortgage-filter-package
|
187d771c441f93b6a5dd2c5bf67ee519d1888430
|
[
"MIT"
] | null | null | null |
mortgage_filter/__init__.py
|
lukavuko/mortgage-filter-package
|
187d771c441f93b6a5dd2c5bf67ee519d1888430
|
[
"MIT"
] | null | null | null |
from mortgage_filter.mortgage_filter import *
from mortgage_filter.mortgage_base import *
from mortgage_filter.exceptions import *
| 26.4
| 45
| 0.856061
| 17
| 132
| 6.352941
| 0.352941
| 0.518519
| 0.5
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098485
| 132
| 4
| 46
| 33
| 0.907563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
c1c846d0c0614ce5dff23f2e66a0dbba014a830e
| 24,535
|
py
|
Python
|
core/arxiv/submission/services/plaintext/tests.py
|
NeolithEra/arxiv-submission-core
|
d4f20be62a882d2d5f3d1584eda69e7d90ca2c12
|
[
"MIT"
] | null | null | null |
core/arxiv/submission/services/plaintext/tests.py
|
NeolithEra/arxiv-submission-core
|
d4f20be62a882d2d5f3d1584eda69e7d90ca2c12
|
[
"MIT"
] | null | null | null |
core/arxiv/submission/services/plaintext/tests.py
|
NeolithEra/arxiv-submission-core
|
d4f20be62a882d2d5f3d1584eda69e7d90ca2c12
|
[
"MIT"
] | null | null | null |
"""Tests for :mod:`arxiv.submission.services.plaintext`."""
from unittest import TestCase, mock
from arxiv.integration.api import exceptions, status
from . import plaintext
mock_app = mock.MagicMock(config={
'PLAINTEXT_ENDPOINT': 'http://foohost:5432',
'PLAINTEXT_VERIFY': False
})
class TestPlainTextService(TestCase):
"""Tests for :class:`.plaintext.PlainTextService`."""
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_already_in_progress(self, mock_Session):
"""A plaintext extraction is already in progress."""
mock_post = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.SEE_OTHER,
json=mock.MagicMock(return_value={}),
headers={'Location': '...'}
)
)
mock_Session.return_value = mock.MagicMock(post=mock_post)
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
with self.assertRaises(plaintext.ExtractionInProgress):
service.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_request_extraction(self, mock_Session):
"""Extraction is successfully requested."""
mock_session = mock.MagicMock(**{
'post': mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.ACCEPTED,
json=mock.MagicMock(return_value={}),
content='',
headers={'Location': '/somewhere'}
)
),
'get': mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.OK,
json=mock.MagicMock(
return_value={'reason': 'extraction in process'}
),
content="{'reason': 'fulltext extraction in process'}",
headers={}
)
)
})
mock_Session.return_value = mock_session
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
self.assertIsNone(service.request_extraction(source_id))
self.assertEqual(
mock_session.post.call_args[0][0],
'http://foohost:8123/submission/132456'
)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_request_extraction_bad_request(self, mock_Session):
"""Service returns 400 Bad Request."""
mock_Session.return_value = mock.MagicMock(
post=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.BAD_REQUEST,
json=mock.MagicMock(return_value={
'reason': 'something is not quite right'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.BadRequest):
service.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_request_extraction_server_error(self, mock_Session):
"""Service returns 500 Internal Server Error."""
mock_Session.return_value = mock.MagicMock(
post=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.INTERNAL_SERVER_ERROR,
json=mock.MagicMock(return_value={
'reason': 'something is not quite right'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.RequestFailed):
service.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_request_extraction_unauthorized(self, mock_Session):
"""Service returns 401 Unauthorized."""
mock_Session.return_value = mock.MagicMock(
post=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.UNAUTHORIZED,
json=mock.MagicMock(return_value={
'reason': 'who are you'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.RequestUnauthorized):
service.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_request_extraction_forbidden(self, mock_Session):
"""Service returns 403 Forbidden."""
mock_Session.return_value = mock.MagicMock(
post=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.FORBIDDEN,
json=mock.MagicMock(return_value={
'reason': 'you do not have sufficient authz'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.RequestForbidden):
service.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_is_complete(self, mock_Session):
"""Extraction is indeed complete."""
mock_get = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.SEE_OTHER,
json=mock.MagicMock(return_value={}),
headers={'Location': '...'}
)
)
mock_Session.return_value = mock.MagicMock(get=mock_get)
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
self.assertTrue(service.extraction_is_complete(source_id))
self.assertEqual(
mock_get.call_args[0][0],
'http://foohost:8123/submission/132456/status'
)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_in_progress(self, mock_Session):
"""Extraction is still in progress."""
mock_get = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.OK,
json=mock.MagicMock(return_value={'status': 'in_progress'})
)
)
mock_Session.return_value = mock.MagicMock(get=mock_get)
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
self.assertFalse(service.extraction_is_complete(source_id))
self.assertEqual(
mock_get.call_args[0][0],
'http://foohost:8123/submission/132456/status'
)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_failed(self, mock_Session):
"""Extraction failed."""
mock_get = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.OK,
json=mock.MagicMock(return_value={'status': 'failed'})
)
)
mock_Session.return_value = mock.MagicMock(get=mock_get)
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
with self.assertRaises(plaintext.ExtractionFailed):
self.assertFalse(service.extraction_is_complete(source_id))
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_complete_unauthorized(self, mock_Session):
"""Service returns 401 Unauthorized."""
mock_Session.return_value = mock.MagicMock(
get=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.UNAUTHORIZED,
json=mock.MagicMock(return_value={
'reason': 'who are you'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.RequestUnauthorized):
service.extraction_is_complete(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_complete_forbidden(self, mock_Session):
"""Service returns 403 Forbidden."""
mock_Session.return_value = mock.MagicMock(
get=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.FORBIDDEN,
json=mock.MagicMock(return_value={
'reason': 'you do not have sufficient authz'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.RequestForbidden):
service.extraction_is_complete(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_unauthorized(self, mock_Session):
"""Service returns 401 Unauthorized."""
mock_Session.return_value = mock.MagicMock(
get=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.UNAUTHORIZED,
json=mock.MagicMock(return_value={
'reason': 'who are you'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.RequestUnauthorized):
service.retrieve_content(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_forbidden(self, mock_Session):
"""Service returns 403 Forbidden."""
mock_Session.return_value = mock.MagicMock(
get=mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.FORBIDDEN,
json=mock.MagicMock(return_value={
'reason': 'you do not have sufficient authz'
})
)
)
)
source_id = '132456'
service = plaintext.PlainTextService('foohost', 8000)
with self.assertRaises(exceptions.RequestForbidden):
service.retrieve_content(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve(self, mock_Session):
"""Retrieval is successful."""
content = b'thisisthecontent'
mock_get = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.OK,
content=content
)
)
mock_Session.return_value = mock.MagicMock(get=mock_get)
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
self.assertEqual(service.retrieve_content(source_id), content,
"Returns binary content as received")
self.assertEqual(
mock_get.call_args[0][0],
'http://foohost:8123/submission/132456'
)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_nonexistant(self, mock_Session):
"""There is no such plaintext resource."""
mock_get = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.NOT_FOUND,
json=mock.MagicMock(return_value={'reason': 'no such thing'})
)
)
mock_Session.return_value = mock.MagicMock(get=mock_get)
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
with self.assertRaises(exceptions.NotFound):
service.retrieve_content(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_in_progress(self, mock_Session):
"""There is no such plaintext resource."""
mock_get = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.SEE_OTHER,
json=mock.MagicMock(return_value={}),
headers={'Location': '...'}
)
)
mock_Session.return_value = mock.MagicMock(get=mock_get)
source_id = '132456'
service = plaintext.PlainTextService('http://foohost:8123')
with self.assertRaises(plaintext.ExtractionInProgress):
service.retrieve_content(source_id)
class TestPlainTextServiceModule(TestCase):
"""Tests for :mod:`.services.plaintext`."""
def session(self, status_code=status.OK, method="get", json={},
content="", headers={}):
"""Make a mock session."""
return mock.MagicMock(**{
method: mock.MagicMock(
return_value=mock.MagicMock(
status_code=status_code,
json=mock.MagicMock(
return_value=json
),
content=content,
headers=headers
)
)
})
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_already_in_progress(self, mock_Session):
"""A plaintext extraction is already in progress."""
mock_Session.return_value = self.session(
status_code=status.SEE_OTHER,
method='post',
headers={'Location': '...'}
)
source_id = '132456'
with self.assertRaises(plaintext.ExtractionInProgress):
plaintext.PlainTextService.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_request_extraction(self, mock_Session):
"""Extraction is successfully requested."""
mock_session = mock.MagicMock(**{
'post': mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.ACCEPTED,
json=mock.MagicMock(return_value={}),
content='',
headers={'Location': '/somewhere'}
)
),
'get': mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.OK,
json=mock.MagicMock(
return_value={'reason': 'extraction in process'}
),
content="{'reason': 'fulltext extraction in process'}",
headers={}
)
)
})
mock_Session.return_value = mock_session
source_id = '132456'
self.assertIsNone(
plaintext.PlainTextService.request_extraction(source_id)
)
self.assertEqual(mock_session.post.call_args[0][0],
'http://foohost:5432/submission/132456')
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_bad_request(self, mock_Session):
"""Service returns 400 Bad Request."""
mock_Session.return_value = self.session(
status_code=status.BAD_REQUEST,
method='post',
json={'reason': 'something is not quite right'}
)
source_id = '132456'
with self.assertRaises(exceptions.BadRequest):
plaintext.PlainTextService.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_server_error(self, mock_Session):
"""Service returns 500 Internal Server Error."""
mock_Session.return_value = self.session(
status_code=status.INTERNAL_SERVER_ERROR,
method='post',
json={'reason': 'something is not quite right'}
)
source_id = '132456'
with self.assertRaises(exceptions.RequestFailed):
plaintext.PlainTextService.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_unauthorized(self, mock_Session):
"""Service returns 401 Unauthorized."""
mock_Session.return_value = self.session(
status_code=status.UNAUTHORIZED,
method='post',
json={'reason': 'who are you'}
)
source_id = '132456'
with self.assertRaises(exceptions.RequestUnauthorized):
plaintext.PlainTextService.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_request_extraction_forbidden(self, mock_Session):
"""Service returns 403 Forbidden."""
mock_Session.return_value = self.session(
status_code=status.FORBIDDEN,
method='post',
json={'reason': 'you do not have sufficient authz'}
)
source_id = '132456'
with self.assertRaises(exceptions.RequestForbidden):
plaintext.PlainTextService.request_extraction(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_is_complete(self, mock_Session):
"""Extraction is indeed complete."""
mock_session = self.session(
status_code=status.SEE_OTHER,
headers={'Location': '...'}
)
mock_Session.return_value = mock_session
source_id = '132456'
self.assertTrue(plaintext.PlainTextService.extraction_is_complete(source_id))
self.assertEqual(mock_session.get.call_args[0][0],
'http://foohost:5432/submission/132456/status')
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_in_progress(self, mock_Session):
"""Extraction is still in progress."""
mock_session = self.session(
json={'status': 'in_progress'}
)
mock_Session.return_value = mock_session
source_id = '132456'
self.assertFalse(plaintext.PlainTextService.extraction_is_complete(source_id))
self.assertEqual(mock_session.get.call_args[0][0],
'http://foohost:5432/submission/132456/status')
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_extraction_failed(self, mock_Session):
"""Extraction failed."""
mock_Session.return_value = self.session(json={'status': 'failed'})
source_id = '132456'
with self.assertRaises(plaintext.ExtractionFailed):
self.assertFalse(plaintext.PlainTextService.extraction_is_complete(source_id))
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_complete_unauthorized(self, mock_Session):
"""Service returns 401 Unauthorized."""
mock_Session.return_value = self.session(
status_code=status.UNAUTHORIZED,
json={'reason': 'who are you'}
)
source_id = '132456'
with self.assertRaises(exceptions.RequestUnauthorized):
plaintext.PlainTextService.extraction_is_complete(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_complete_forbidden(self, mock_Session):
"""Service returns 403 Forbidden."""
mock_Session.return_value = self.session(
status_code=status.FORBIDDEN,
json={'reason': 'you do not have sufficient authz'}
)
source_id = '132456'
with self.assertRaises(exceptions.RequestForbidden):
plaintext.PlainTextService.extraction_is_complete(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_unauthorized(self, mock_Session):
"""Service returns 401 Unauthorized."""
mock_Session.return_value = self.session(
status_code=status.UNAUTHORIZED,
json={'reason': 'who are you'}
)
source_id = '132456'
with self.assertRaises(exceptions.RequestUnauthorized):
plaintext.PlainTextService.retrieve_content(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_forbidden(self, mock_Session):
"""Service returns 403 Forbidden."""
mock_Session.return_value = self.session(
status_code=status.FORBIDDEN,
json={'reason': 'you do not have sufficient authz'}
)
source_id = '132456'
with self.assertRaises(exceptions.RequestForbidden):
plaintext.PlainTextService.retrieve_content(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve(self, mock_Session):
"""Retrieval is successful."""
content = b'thisisthecontent'
mock_get = mock.MagicMock(
return_value=mock.MagicMock(
status_code=status.OK,
content=content
)
)
mock_Session.return_value = mock.MagicMock(get=mock_get)
source_id = '132456'
self.assertEqual(
plaintext.PlainTextService.retrieve_content(source_id),
content,
"Returns binary content as received"
)
self.assertEqual(mock_get.call_args[0][0],
'http://foohost:5432/submission/132456')
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_nonexistant(self, mock_Session):
"""There is no such plaintext resource."""
mock_Session.return_value = self.session(
status_code=status.NOT_FOUND,
json={'reason': 'no such thing'}
)
source_id = '132456'
with self.assertRaises(exceptions.NotFound):
plaintext.PlainTextService.retrieve_content(source_id)
@mock.patch('arxiv.integration.api.service.current_app', mock_app)
@mock.patch('arxiv.integration.api.service.requests.Session')
def test_retrieve_in_progress(self, mock_Session):
"""There is no such plaintext resource."""
mock_Session.return_value = self.session(
status_code=status.SEE_OTHER,
headers={'Location': '...'}
)
source_id = '132456'
with self.assertRaises(plaintext.ExtractionInProgress):
plaintext.PlainTextService.retrieve_content(source_id)
| 42.743902
| 90
| 0.622009
| 2,477
| 24,535
| 5.967703
| 0.051272
| 0.071235
| 0.083548
| 0.10824
| 0.954945
| 0.943918
| 0.930794
| 0.922744
| 0.918685
| 0.906305
| 0
| 0.022483
| 0.267618
| 24,535
| 573
| 91
| 42.818499
| 0.800156
| 0.050377
| 0
| 0.780242
| 0
| 0
| 0.194827
| 0.120426
| 0
| 0
| 0
| 0
| 0.084677
| 1
| 0.066532
| false
| 0
| 0.006048
| 0
| 0.078629
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a9af80f351b858eae02cbda26821ff7ba45d92d0
| 2,730
|
py
|
Python
|
data.py
|
xiyuanzh/ESC-GAN
|
1052e170a6414e20b4a7235b25c63df49361046e
|
[
"Apache-2.0"
] | 9
|
2022-01-27T19:31:53.000Z
|
2022-03-30T20:36:48.000Z
|
data.py
|
xiyuanzh/ESC-GAN
|
1052e170a6414e20b4a7235b25c63df49361046e
|
[
"Apache-2.0"
] | null | null | null |
data.py
|
xiyuanzh/ESC-GAN
|
1052e170a6414e20b4a7235b25c63df49361046e
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import Dataset
import xarray as xr
import numpy as np
import torch
class Prep(Dataset):
def __init__(self, seq_len):
dset = xr.open_dataset("./data/CMAP.nc")
temp_nc = dset.variables['precip'][:492] #up to year 2019
timespan = len(temp_nc)
lat = len(temp_nc[0])
lon = len(temp_nc[0][0])
temp_arr = temp_nc.values
mean, var = self.z_norm(temp_arr)
temp_arr = (temp_arr - self.mean) / self.var
self.valid_mask = (temp_arr == temp_arr) + 0
temp_arr[temp_arr != temp_arr] = 0 #set nan to zero
self.temp = torch.from_numpy(temp_arr).unsqueeze(3)
self.temp = torch.stack(list(torch.split(self.temp, seq_len))).float()
self.valid_mask = torch.stack(list(torch.split(torch.from_numpy(self.valid_mask), seq_len))).unsqueeze(4).float()
test_mask = np.load('./data/cmap_mask.npy').reshape((lat, lon, 1))
self.test_mask = torch.from_numpy(test_mask).repeat(len(self.temp), seq_len, 1, 1, 1).float()
def __len__(self):
return len(self.temp)
def __getitem__(self, idx):
return self.temp[idx], self.valid_mask[idx], self.test_mask[idx]
def z_norm(self, x):
self.mean = np.nanmean(x)
self.var = np.nanstd(x)
return self.mean, self.var
def de_z_norm(self, x):
return x * self.var + self.mean
class Hadcrut(Dataset):
def __init__(self, seq_len):
dset = xr.open_dataset("./data/HadCRUT.nc")
temp_nc = dset.variables['temperature_anomaly'][:2040] # up to year 2019
timespan = len(temp_nc)
lat = len(temp_nc[0])
lon = len(temp_nc[0][0])
temp_arr = temp_nc.values
mean, var = self.z_norm(temp_arr)
temp_arr = (temp_arr - self.mean) / self.var
self.valid_mask = (temp_arr == temp_arr) + 0
temp_arr[temp_arr != temp_arr] = 0 #set nan to zero
self.temp = torch.from_numpy(temp_arr).unsqueeze(3)
self.temp = torch.stack(list(torch.split(self.temp, seq_len))).float()
self.valid_mask = torch.stack(list(torch.split(torch.from_numpy(self.valid_mask), seq_len))).unsqueeze(4).float()
test_mask = np.load('./data/hadcrut_mask.npy').reshape((lat, lon, 1))
self.test_mask = torch.from_numpy(test_mask).repeat(len(self.temp), seq_len, 1, 1, 1).float()
def __len__(self):
return len(self.temp)
def __getitem__(self, idx):
return self.temp[idx], self.valid_mask[idx], self.test_mask[idx]
def z_norm(self, x):
self.mean = np.nanmean(x)
self.var = np.nanstd(x)
return self.mean, self.var
def de_z_norm(self, x):
return x * self.var + self.mean
| 32.891566
| 121
| 0.624908
| 425
| 2,730
| 3.785882
| 0.162353
| 0.087011
| 0.082039
| 0.087011
| 0.908639
| 0.882536
| 0.882536
| 0.882536
| 0.882536
| 0.882536
| 0
| 0.017653
| 0.232234
| 2,730
| 83
| 122
| 32.891566
| 0.75
| 0.022344
| 0
| 0.793103
| 0
| 0
| 0.037134
| 0.008627
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.068966
| 0.103448
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.