hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79548df379377d03de3279aeb0242e2c8e121223 | 39,548 | py | Python | mac/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/servicemanagement/v1/servicemanagement_v1_client.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | mac/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/servicemanagement/v1/servicemanagement_v1_client.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | mac/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/servicemanagement/v1/servicemanagement_v1_client.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | """Generated client library for servicemanagement version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.servicemanagement.v1 import servicemanagement_v1_messages as messages
class ServicemanagementV1(base_api.BaseApiClient):
"""Generated client library for service servicemanagement version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://servicemanagement.googleapis.com/'
_PACKAGE = u'servicemanagement'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/service.management', u'https://www.googleapis.com/auth/service.management.readonly']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ServicemanagementV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new servicemanagement handle."""
url = url or self.BASE_URL
super(ServicemanagementV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.operations = self.OperationsService(self)
self.services_accessPolicy = self.ServicesAccessPolicyService(self)
self.services_configs = self.ServicesConfigsService(self)
self.services_consumers = self.ServicesConsumersService(self)
self.services_customerSettings = self.ServicesCustomerSettingsService(self)
self.services_projectSettings = self.ServicesProjectSettingsService(self)
self.services_rollouts = self.ServicesRolloutsService(self)
self.services = self.ServicesService(self)
class OperationsService(base_api.BaseApiService):
"""Service class for the operations resource."""
_NAME = u'operations'
def __init__(self, client):
super(ServicemanagementV1.OperationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (ServicemanagementOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.operations.get',
ordered_params=[u'operationsId'],
path_params=[u'operationsId'],
query_params=[],
relative_path=u'v1/operations/{operationsId}',
request_field='',
request_type_name=u'ServicemanagementOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists service operations that match the specified filter in the request.
Args:
request: (ServicemanagementOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.operations.list',
ordered_params=[],
path_params=[],
query_params=[u'filter', u'name', u'pageSize', u'pageToken'],
relative_path=u'v1/operations',
request_field='',
request_type_name=u'ServicemanagementOperationsListRequest',
response_type_name=u'ListOperationsResponse',
supports_download=False,
)
class ServicesAccessPolicyService(base_api.BaseApiService):
"""Service class for the services_accessPolicy resource."""
_NAME = u'services_accessPolicy'
def __init__(self, client):
super(ServicemanagementV1.ServicesAccessPolicyService, self).__init__(client)
self._upload_configs = {
}
def Query(self, request, global_params=None):
r"""Method to query the accessibility of a service and any associated.
visibility labels for a specified user.
Members of the producer project may call this method and specify any user.
Any user may call this method, but must specify their own email address.
In this case the method will return NOT_FOUND if the user has no access to
the service.
Args:
request: (ServicemanagementServicesAccessPolicyQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(QueryUserAccessResponse) The response message.
"""
config = self.GetMethodConfig('Query')
return self._RunMethod(
config, request, global_params=global_params)
Query.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.accessPolicy.query',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[u'userEmail'],
relative_path=u'v1/services/{serviceName}/accessPolicy:query',
request_field='',
request_type_name=u'ServicemanagementServicesAccessPolicyQueryRequest',
response_type_name=u'QueryUserAccessResponse',
supports_download=False,
)
class ServicesConfigsService(base_api.BaseApiService):
"""Service class for the services_configs resource."""
_NAME = u'services_configs'
def __init__(self, client):
super(ServicemanagementV1.ServicesConfigsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new service configuration (version) for a managed service.
This method only stores the service configuration. To roll out the service
configuration to backend systems please call
CreateServiceRollout.
Only the 100 most recent service configurations and ones referenced by
existing rollouts are kept for each service. The rest will be deleted
eventually.
Args:
request: (ServicemanagementServicesConfigsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Service) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.configs.create',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}/configs',
request_field=u'service',
request_type_name=u'ServicemanagementServicesConfigsCreateRequest',
response_type_name=u'Service',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a service configuration (version) for a managed service.
Args:
request: (ServicemanagementServicesConfigsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Service) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.configs.get',
ordered_params=[u'serviceName', u'configId'],
path_params=[u'configId', u'serviceName'],
query_params=[u'view'],
relative_path=u'v1/services/{serviceName}/configs/{configId}',
request_field='',
request_type_name=u'ServicemanagementServicesConfigsGetRequest',
response_type_name=u'Service',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the history of the service configuration for a managed service,.
from the newest to the oldest.
Args:
request: (ServicemanagementServicesConfigsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListServiceConfigsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.configs.list',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/services/{serviceName}/configs',
request_field='',
request_type_name=u'ServicemanagementServicesConfigsListRequest',
response_type_name=u'ListServiceConfigsResponse',
supports_download=False,
)
def Submit(self, request, global_params=None):
r"""Creates a new service configuration (version) for a managed service based.
on
user-supplied configuration source files (for example: OpenAPI
Specification). This method stores the source configurations as well as the
generated service configuration. To rollout the service configuration to
other services,
please call CreateServiceRollout.
Only the 100 most recent configuration sources and ones referenced by
existing service configurtions are kept for each service. The rest will be
deleted eventually.
Operation<response: SubmitConfigSourceResponse>
Args:
request: (ServicemanagementServicesConfigsSubmitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Submit')
return self._RunMethod(
config, request, global_params=global_params)
Submit.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.configs.submit',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}/configs:submit',
request_field=u'submitConfigSourceRequest',
request_type_name=u'ServicemanagementServicesConfigsSubmitRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ServicesConsumersService(base_api.BaseApiService):
"""Service class for the services_consumers resource."""
_NAME = u'services_consumers'
def __init__(self, client):
super(ServicemanagementV1.ServicesConsumersService, self).__init__(client)
self._upload_configs = {
}
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Args:
request: (ServicemanagementServicesConsumersGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.consumers.getIamPolicy',
ordered_params=[u'servicesId', u'consumersId'],
path_params=[u'consumersId', u'servicesId'],
query_params=[],
relative_path=u'v1/services/{servicesId}/consumers/{consumersId}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'ServicemanagementServicesConsumersGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any.
existing policy.
Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED
Args:
request: (ServicemanagementServicesConsumersSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.consumers.setIamPolicy',
ordered_params=[u'servicesId', u'consumersId'],
path_params=[u'consumersId', u'servicesId'],
query_params=[],
relative_path=u'v1/services/{servicesId}/consumers/{consumersId}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'ServicemanagementServicesConsumersSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Note: This operation is designed to be used for building permission-aware
UIs and command-line tools, not for authorization checking. This operation
may "fail open" without warning.
Args:
request: (ServicemanagementServicesConsumersTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.consumers.testIamPermissions',
ordered_params=[u'servicesId', u'consumersId'],
path_params=[u'consumersId', u'servicesId'],
query_params=[],
relative_path=u'v1/services/{servicesId}/consumers/{consumersId}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'ServicemanagementServicesConsumersTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ServicesCustomerSettingsService(base_api.BaseApiService):
"""Service class for the services_customerSettings resource."""
_NAME = u'services_customerSettings'
def __init__(self, client):
super(ServicemanagementV1.ServicesCustomerSettingsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Retrieves the settings that control the specified customer's usage of the.
service.
Args:
request: (ServicemanagementServicesCustomerSettingsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CustomerSettings) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.customerSettings.get',
ordered_params=[u'serviceName', u'customerId'],
path_params=[u'customerId', u'serviceName'],
query_params=[u'expand', u'view'],
relative_path=u'v1/services/{serviceName}/customerSettings/{customerId}',
request_field='',
request_type_name=u'ServicemanagementServicesCustomerSettingsGetRequest',
response_type_name=u'CustomerSettings',
supports_download=False,
)
class ServicesProjectSettingsService(base_api.BaseApiService):
"""Service class for the services_projectSettings resource."""
_NAME = u'services_projectSettings'
def __init__(self, client):
super(ServicemanagementV1.ServicesProjectSettingsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Retrieves the settings that control the specified consumer project's usage.
of the service.
Args:
request: (ServicemanagementServicesProjectSettingsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ProjectSettings) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.projectSettings.get',
ordered_params=[u'serviceName', u'consumerProjectId'],
path_params=[u'consumerProjectId', u'serviceName'],
query_params=[u'expand', u'view'],
relative_path=u'v1/services/{serviceName}/projectSettings/{consumerProjectId}',
request_field='',
request_type_name=u'ServicemanagementServicesProjectSettingsGetRequest',
response_type_name=u'ProjectSettings',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates specified subset of the settings that control the specified.
consumer project's usage of the service. Attempts to update a field not
controlled by the caller will result in an access denied error.
Operation<response: ProjectSettings>
The metadata field of the Operation will be a CompositeOperationMetadata
object.
Args:
request: (ServicemanagementServicesProjectSettingsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'servicemanagement.services.projectSettings.patch',
ordered_params=[u'serviceName', u'consumerProjectId'],
path_params=[u'consumerProjectId', u'serviceName'],
query_params=[u'excludeFinalQuotaSettingsInResponse', u'updateMask'],
relative_path=u'v1/services/{serviceName}/projectSettings/{consumerProjectId}',
request_field=u'projectSettings',
request_type_name=u'ServicemanagementServicesProjectSettingsPatchRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ServicesRolloutsService(base_api.BaseApiService):
"""Service class for the services_rollouts resource."""
_NAME = u'services_rollouts'
def __init__(self, client):
super(ServicemanagementV1.ServicesRolloutsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new service configuration rollout. Based on rollout, the.
Google Service Management will roll out the service configurations to
different backend services. For example, the logging configuration will be
pushed to Google Cloud Logging.
Please note that any previous pending and running Rollouts and associated
Operations will be automatically cancelled so that the latest Rollout will
not be blocked by previous Rollouts.
Only the 100 most recent (in any state) and the last 10 successful (if not
already part of the set of 100 most recent) rollouts are kept for each
service. The rest will be deleted eventually.
Operation<response: Rollout>
Args:
request: (ServicemanagementServicesRolloutsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.rollouts.create',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[u'force'],
relative_path=u'v1/services/{serviceName}/rollouts',
request_field=u'rollout',
request_type_name=u'ServicemanagementServicesRolloutsCreateRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a service configuration rollout.
Args:
request: (ServicemanagementServicesRolloutsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Rollout) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.rollouts.get',
ordered_params=[u'serviceName', u'rolloutId'],
path_params=[u'rolloutId', u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}/rollouts/{rolloutId}',
request_field='',
request_type_name=u'ServicemanagementServicesRolloutsGetRequest',
response_type_name=u'Rollout',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the history of the service configuration rollouts for a managed.
service, from the newest to the oldest.
Args:
request: (ServicemanagementServicesRolloutsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListServiceRolloutsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.rollouts.list',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1/services/{serviceName}/rollouts',
request_field='',
request_type_name=u'ServicemanagementServicesRolloutsListRequest',
response_type_name=u'ListServiceRolloutsResponse',
supports_download=False,
)
class ServicesService(base_api.BaseApiService):
"""Service class for the services resource."""
_NAME = u'services'
def __init__(self, client):
super(ServicemanagementV1.ServicesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new managed service.
Please note one producer project can own no more than 20 services.
Operation<response: ManagedService>
Args:
request: (ManagedService) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.create',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'v1/services',
request_field='<request>',
request_type_name=u'ManagedService',
response_type_name=u'Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a managed service. This method will change the service to the.
`Soft-Delete` state for 30 days. Within this period, service producers may
call UndeleteService to restore the service.
After 30 days, the service will be permanently deleted.
Operation<response: google.protobuf.Empty>
Args:
request: (ServicemanagementServicesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'servicemanagement.services.delete',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}',
request_field='',
request_type_name=u'ServicemanagementServicesDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Disable(self, request, global_params=None):
r"""Disables a service for a project, so it can no longer be.
be used for the project. It prevents accidental usage that may cause
unexpected billing charges or security leaks.
Operation<response: DisableServiceResponse>
Args:
request: (ServicemanagementServicesDisableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Disable')
return self._RunMethod(
config, request, global_params=global_params)
Disable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.disable',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}:disable',
request_field=u'disableServiceRequest',
request_type_name=u'ServicemanagementServicesDisableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Enable(self, request, global_params=None):
r"""Enables a service for a project, so it can be used.
for the project. See
[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for
more information.
Operation<response: EnableServiceResponse>
Args:
request: (ServicemanagementServicesEnableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Enable')
return self._RunMethod(
config, request, global_params=global_params)
Enable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.enable',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}:enable',
request_field=u'enableServiceRequest',
request_type_name=u'ServicemanagementServicesEnableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def GenerateConfigReport(self, request, global_params=None):
r"""Generates and returns a report (errors, warnings and changes from.
existing configurations) associated with
GenerateConfigReportRequest.new_value
If GenerateConfigReportRequest.old_value is specified,
GenerateConfigReportRequest will contain a single ChangeReport based on the
comparison between GenerateConfigReportRequest.new_value and
GenerateConfigReportRequest.old_value.
If GenerateConfigReportRequest.old_value is not specified, this method
will compare GenerateConfigReportRequest.new_value with the last pushed
service configuration.
Args:
request: (GenerateConfigReportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GenerateConfigReportResponse) The response message.
"""
config = self.GetMethodConfig('GenerateConfigReport')
return self._RunMethod(
config, request, global_params=global_params)
GenerateConfigReport.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.generateConfigReport',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'v1/services:generateConfigReport',
request_field='<request>',
request_type_name=u'GenerateConfigReportRequest',
response_type_name=u'GenerateConfigReportResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a managed service. Authentication is required unless the service is.
public.
Args:
request: (ServicemanagementServicesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedService) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.get',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[u'consumerProjectId', u'expand', u'view'],
relative_path=u'v1/services/{serviceName}',
request_field='',
request_type_name=u'ServicemanagementServicesGetRequest',
response_type_name=u'ManagedService',
supports_download=False,
)
def GetConfig(self, request, global_params=None):
r"""Gets a service configuration (version) for a managed service.
Args:
request: (ServicemanagementServicesGetConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Service) The response message.
"""
config = self.GetMethodConfig('GetConfig')
return self._RunMethod(
config, request, global_params=global_params)
GetConfig.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.getConfig',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[u'configId', u'view'],
relative_path=u'v1/services/{serviceName}/config',
request_field='',
request_type_name=u'ServicemanagementServicesGetConfigRequest',
response_type_name=u'Service',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Args:
request: (ServicemanagementServicesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.getIamPolicy',
ordered_params=[u'servicesId'],
path_params=[u'servicesId'],
query_params=[],
relative_path=u'v1/services/{servicesId}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'ServicemanagementServicesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists managed services.
Returns all public services. For authenticated users, also returns all
services the calling user has "servicemanagement.services.get" permission
for.
**BETA:** If the caller specifies the `consumer_id`, it returns only the
services enabled on the consumer. The `consumer_id` must have the format
of "project:{PROJECT-ID}".
Args:
request: (ServicemanagementServicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListServicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'servicemanagement.services.list',
ordered_params=[],
path_params=[],
query_params=[u'category', u'consumerId', u'consumerProjectId', u'pageSize', u'pageToken', u'producerProjectId'],
relative_path=u'v1/services',
request_field='',
request_type_name=u'ServicemanagementServicesListRequest',
response_type_name=u'ListServicesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the specified one of the configurations. If the specified service.
does not exist the patch operation fails.
Operation<response: ManagedService>
Args:
request: (ServicemanagementServicesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'servicemanagement.services.patch',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[u'updateMask'],
relative_path=u'v1/services/{serviceName}',
request_field=u'managedService',
request_type_name=u'ServicemanagementServicesPatchRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any.
existing policy.
Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED
Args:
request: (ServicemanagementServicesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.setIamPolicy',
ordered_params=[u'servicesId'],
path_params=[u'servicesId'],
query_params=[],
relative_path=u'v1/services/{servicesId}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'ServicemanagementServicesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Note: This operation is designed to be used for building permission-aware
UIs and command-line tools, not for authorization checking. This operation
may "fail open" without warning.
Args:
request: (ServicemanagementServicesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.testIamPermissions',
ordered_params=[u'servicesId'],
path_params=[u'servicesId'],
query_params=[],
relative_path=u'v1/services/{servicesId}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'ServicemanagementServicesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
def Undelete(self, request, global_params=None):
r"""Revives a previously deleted managed service. The method restores the.
service using the configuration at the time the service was deleted.
The target service must exist and must have been deleted within the
last 30 days.
Operation<response: UndeleteServiceResponse>
Args:
request: (ServicemanagementServicesUndeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Undelete')
return self._RunMethod(
config, request, global_params=global_params)
Undelete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicemanagement.services.undelete',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}:undelete',
request_field='',
request_type_name=u'ServicemanagementServicesUndeleteRequest',
response_type_name=u'Operation',
supports_download=False,
)
| 39.627255 | 243 | 0.714044 |
79548eff5a738a873b6f5ba4d47136432e97fde1 | 5,202 | py | Python | pytorch_ares/test/test_fast_adversarial.py | thu-ml/realsafe | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 107 | 2020-06-15T09:55:11.000Z | 2020-12-20T11:27:11.000Z | pytorch_ares/test/test_fast_adversarial.py | haichen-ber/ares | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 7 | 2020-06-14T03:00:18.000Z | 2020-12-07T07:10:10.000Z | pytorch_ares/test/test_fast_adversarial.py | haichen-ber/ares | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 19 | 2020-06-14T08:35:33.000Z | 2020-12-19T13:43:41.000Z | import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import torch
import numpy as np
from pytorch_ares.dataset_torch.cifar_dataset import cifar10
from third_party.attack_cifar import *
from third_party.autoattack.autoattack import AutoAttack
from pytorch_ares.cifar10_model.utils import load_model_from_path
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default="0", help="Comma separated list of GPU ids")
parser.add_argument('--result_path', default=os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__))),'test_out'), help='result path for cifar10')
#dataset
parser.add_argument('--batchsize', default=10, help= 'batchsize for this model')
parser.add_argument('--cifar10_path', default=os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__))),'data/CIFAR10'), help='cifar10_path for this model')
parser.add_argument('--norm', default=np.inf, help='You can choose np.inf and 2(l2), l2 support all methods and linf dont support apgd and deepfool', choices=[np.inf, 2])
parser.add_argument('--eps', type= float, default=8/255.0, help='linf: 8/255.0 and l2: 3.0')
parser.add_argument('--stepsize', type= float, default=8/24000.0, help='linf: eps/steps and l2: (2.5*eps)/steps that is 0.075')
parser.add_argument('--steps', type= int, default=100, help='linf: 100 and l2: 100, steps is set to 100 if attack is apgd')
parser.add_argument('--n_queries', default= 5000, help= 'n_queries for square')
parser.add_argument('--version', default='rand', help= 'version for autoattack', choices= ['standard', 'plus', 'rand'])
parser.add_argument('--target', default=False, help= 'target for attack', choices= [True, False])
args = parser.parse_args()
gpu_list = [int(i) for i in args.gpu.strip().split(",")]
device = torch.device(f"cuda:{gpu_list[0]}" if torch.cuda.is_available() else "cpu")
model_path = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__))), 'pytorch_ares/cifar10_model/fast_at.py')
rs_model = load_model_from_path(model_path)
net = rs_model.load(device)
test_loader = cifar10(args.batchsize, args.cifar10_path)
test_num = 0
test_num_pgd = 0
test_num_apgd = 0
test_num_square = 0
test_num_aa = 0
with open(os.path.join(args.result_path, "{}.csv".format("awp")), "a") as f:
for i, (image,labels) in enumerate(test_loader, 1):
batchsize = image.shape[0]
image, labels = image.to(device), labels.to(device)
out = net(image)
out = torch.argmax(out, dim=1)
acc = (out == labels)
test_num += (out == labels).sum()
ind_to_fool = acc.nonzero().squeeze()
if len(ind_to_fool.shape) == 0: ind_to_fool = ind_to_fool.unsqueeze(0)
if ind_to_fool.numel() != 0:
x_to_fool, y_to_fool = image[ind_to_fool].clone(), labels[ind_to_fool].clone()
Pgd = PGD(net, epsilon=args.eps, norm=args.norm, stepsize=args.stepsize, steps=args.steps,target=args.target, device=device)
autoattck = AutoAttack(net,norm="Linf",steps=args.steps, query=args.n_queries, eps=args.eps, version=args.version,device=device)
adv_pgd= Pgd.forward(x_to_fool, y_to_fool)
adv_autoattack = autoattck.run_standard_evaluation(x_to_fool, y_to_fool,bs=len(y_to_fool))
# attack output
out_adv_pgd = net(adv_pgd)
out_adv_autoattack = net(adv_autoattack)
out_adv_pgd = torch.argmax(out_adv_pgd, dim=1)
out_adv_autoattack = torch.argmax(out_adv_autoattack, dim=1)
test_num_pgd += (out_adv_pgd == y_to_fool).sum()
test_num_aa += (out_adv_autoattack == y_to_fool).sum()
if i % 50 == 0:
num = i*batchsize
test_acc = test_num.item() / num
test_acc_pgd = test_num_pgd.item() / num
test_acc_aa = test_num_aa.item() / num
print("epoch: %d clean_acc: %.2f %%" %(i, test_acc*100 ))
print("epoch: %d pgd_acc: %.2f %%" %(i, test_acc_pgd*100))
print("epoch: %d autoattack_acc: %.2f %%\n" %(i, test_acc_aa*100))
total_num = len(test_loader.dataset)
final_test_acc = test_num.item() / total_num
success_num_pgd = test_num_pgd.item() / total_num
success_num_aa = test_num_aa.item() / total_num
print("clean_acc: %.2f %%" %(final_test_acc*100))
print("pgd_acc: %.2f %%" %(success_num_pgd*100))
print("autoattack_acc: %.2f %%\n" %(success_num_aa*100))
f.write(f"clean_acc: %.2f %%" %(final_test_acc*100))
f.write(f"pgd_acc: %.2f %%\n" %(success_num_pgd*100))
f.write(f"autoattack_acc: %.2f %%\n" %(success_num_aa*100))
| 51.50495 | 175 | 0.622261 |
79548f3e4beb1ddee7dad729fab71f7583f81b08 | 5,965 | py | Python | test/TrieTest.py | forslund/adapt | 4a8fe8ab7def813dc4f6f881ce646b92863d4bfc | [
"Apache-2.0"
] | null | null | null | test/TrieTest.py | forslund/adapt | 4a8fe8ab7def813dc4f6f881ce646b92863d4bfc | [
"Apache-2.0"
] | null | null | null | test/TrieTest.py | forslund/adapt | 4a8fe8ab7def813dc4f6f881ce646b92863d4bfc | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from adapt.tools.text.trie import Trie
__author__ = 'seanfitz'
class TrieTest(unittest.TestCase):
def setUp(self):
pass
def test_basic_retrieval(self):
trie = Trie()
trie.insert("restaurant")
results = list(trie.lookup("restaurant"))
assert len(results) == 1
def test_data_is_correct_on_insert(self):
trie = Trie()
trie.insert("restaurant", "Concept")
results = list(trie.lookup("restaurant"))
assert len(results) == 1
assert len(results[0].get('data')) == 1
data = list(results[0].get('data'))
assert data[0] == 'Concept'
def test_gather(self):
trie = Trie()
trie.insert("rest")
trie.insert("restaurant")
results = list(trie.gather("restaurant"))
assert len(results) == 1
assert results[0].get('key') == "restaurant"
def test_retrieval_based_on_insertion_order(self):
trie = Trie()
trie.insert("rest")
trie.insert("restaurant")
results = list(trie.lookup("rest"))
assert len(results) == 1
results = list(trie.lookup("restaurant"))
assert len(results) == 1
def test_retrieval_of_multi_word_entity(self):
trie = Trie()
trie.insert("play", "PlayVerb")
trie.insert("the big bang theory", "Television Series")
results = list(trie.gather("1 of the big bang theory"))
assert len(results) == 0
def test_insert_single_character_entity(self):
trie = Trie()
trie.insert("1", "Number")
results = list(trie.gather("1 of the big bang theory"))
assert len(results) == 1
assert len(results[0].get('data')) == 1
def test_simple_remove(self):
trie = Trie()
trie.insert("1", "Number")
results = list(trie.lookup("1"))
assert len(results) == 1
assert len(results[0].get('data')) == 1
assert trie.remove("1")
results = list(trie.lookup("1"))
assert len(results) == 0
def test_named_remove(self):
trie = Trie()
trie.insert("1", "Number")
trie.insert("1", "The Loneliest")
results = list(trie.lookup("1"))
assert len(results) == 1
assert len(results[0].get('data')) == 2
assert trie.remove("1", "Number")
results = list(trie.lookup("1"))
assert len(results) == 1
assert len(results[0].get('data')) == 1
def test_edit_distance(self):
trie = Trie(max_edit_distance=1)
trie.insert("restaurant")
results = list(trie.lookup("restauran"))
assert len(results) == 1
results = list(trie.lookup("estaurant"))
assert len(results) == 1
results = list(trie.lookup("estauran"))
assert len(results) == 0
def test_edit_distance_confidence(self):
trie = Trie(max_edit_distance=2)
trie.insert("a")
trie.insert("bb")
trie.insert("ccc")
trie.insert("dddd")
trie.insert("100")
results = list(trie.gather("b"))
assert len(results) == 1
assert results[0].get('confidence') == 0.5
results = list(trie.gather("1 of"))
assert len(results) == 3
def test_edit_distance_no_confidence(self):
trie = Trie(max_edit_distance=2)
trie.insert("1", "Number")
results = list(trie.gather("of the big bang theory"))
assert len(results) == 0
def test_remove(self):
trie = Trie(max_edit_distance=2)
trie.insert("1", "Number")
trie.insert("2", "Number")
trie.remove("2")
one_lookup = list(trie.gather("1"))
two_lookup = list(trie.gather("2"))
assert len(one_lookup) == 1 # One match found
assert len(two_lookup) == 0 # Zero matches since removed
def test_remove_multi_last(self):
trie = Trie(max_edit_distance=2)
trie.insert("Kermit", "Muppets")
trie.insert("Kermit", "Frogs")
kermit_lookup = list(trie.lookup("Kermit"))[0]
assert 'Frogs' in kermit_lookup['data']
assert 'Muppets' in kermit_lookup['data']
trie.remove("Kermit", "Frogs")
kermit_lookup = list(trie.gather("Kermit"))[0]
assert kermit_lookup['data'] == {"Muppets"} # Right data remains
def test_remove_multi_first(self):
trie = Trie(max_edit_distance=2)
trie.insert("Kermit", "Muppets")
trie.insert("Kermit", "Frogs")
kermit_lookup = list(trie.lookup("Kermit"))[0]
assert 'Frogs' in kermit_lookup['data']
assert 'Muppets' in kermit_lookup['data']
trie.remove("Kermit", "Muppets")
kermit_lookup = list(trie.lookup("Kermit"))[0]
assert kermit_lookup['data'] == {"Frogs"} # Right data remains
def test_scan(self):
trie = Trie(max_edit_distance=2)
trie.insert("Kermit", "Muppets")
trie.insert("Gonzo", "Muppets")
trie.insert("Rowlf", "Muppets")
trie.insert("Gobo", "Fraggles")
def match_func(data):
return data == "Muppets"
results = trie.scan(match_func)
assert len(results) == 3
muppet_names = [r[0] for r in results]
assert "Kermit" in muppet_names
assert "Gonzo" in muppet_names
assert "Rowlf" in muppet_names
def tearDown(self):
pass
| 32.418478 | 74 | 0.601509 |
79548fd5ced38d8c30b703a40ee0f42cdd49d629 | 11,528 | py | Python | model/models.py | mongmogroup/auto-trading-framework | b68123e3820083d53b4c7637dc4547adc1eae228 | [
"MIT"
] | null | null | null | model/models.py | mongmogroup/auto-trading-framework | b68123e3820083d53b4c7637dc4547adc1eae228 | [
"MIT"
] | null | null | null | model/models.py | mongmogroup/auto-trading-framework | b68123e3820083d53b4c7637dc4547adc1eae228 | [
"MIT"
] | null | null | null | # common library
import time
import numpy as np
# from stable_baselines import A2C
# from stable_baselines import DDPG
# RL models from stable-baselines
# from stable_baselines import PPO2
# from stable_baselines import TD3
# from stable_baselines.common.noise import OrnsteinUhlenbeckActionNoise
# from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3 import A2C
from stable_baselines3 import PPO
from stable_baselines3 import DDPG
from stable_baselines3.common.noise import OrnsteinUhlenbeckActionNoise
from config import config
from env.EnvMultipleStock_trade import StockEnvTrade
from env.EnvMultipleStock_train import StockEnvTrain
from env.EnvMultipleStock_validation import StockEnvValidation
from preprocessing.preprocessors import *
def train_A2C(env_train, model_name, timesteps=50000):
"""A2C model"""
start = time.time()
model = A2C('MlpPolicy', env_train, verbose=0)
model.learn(total_timesteps=timesteps)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (A2C): ', (end - start) / 60, ' minutes')
return model
def train_TD3(env_train, model_name, timesteps=50000):
"""TD3 model"""
start = time.time()
model = TD3('MlpPolicy', env_train)
model.learn(total_timesteps=timesteps, log_interval=10)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (TD3): ', (end - start) / 60, ' minutes')
return model
def train_DDPG(env_train, model_name, timesteps=10000):
"""DDPG model"""
# the noise objects for DDPG
n_actions = env_train.action_space.shape[-1]
# action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))
start = time.time()
param_noise = None
# removed keyword "param_noise=param_noise" stable_baselines3 doesn't need this one
model = DDPG('MlpPolicy', env_train, action_noise=action_noise)
model.learn(total_timesteps=timesteps)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (DDPG): ', (end - start) / 60, ' minutes')
return model
def train_PPO(env_train, model_name, timesteps=50000):
"""PPO model"""
start = time.time()
model = PPO('MlpPolicy', env_train)
model.learn(total_timesteps=timesteps)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (PPO): ', (end - start) / 60, ' minutes')
return model
def DRL_prediction(df,
model,
name,
last_state,
iter_num,
unique_trade_date,
rebalance_window,
turbulence_threshold,
initial):
### make a prediction based on trained model###
## trading env
trade_data = data_split(df, start=unique_trade_date[iter_num - rebalance_window], end=unique_trade_date[iter_num])
env_trade = DummyVecEnv([lambda: StockEnvTrade(trade_data,
turbulence_threshold=turbulence_threshold,
initial=initial,
previous_state=last_state,
model_name=name,
iteration=iter_num)])
obs_trade = env_trade.reset()
for i in range(len(trade_data.index.unique())):
action, _states = model.predict(obs_trade)
obs_trade, rewards, dones, info = env_trade.step(action)
if i == (len(trade_data.index.unique()) - 2):
# print(env_test.render())
last_state = env_trade.render()
df_last_state = pd.DataFrame({'last_state': last_state})
df_last_state.to_csv('results/last_state_{}_{}.csv'.format(name, i), index=False)
return last_state
def DRL_validation(model, test_data, test_env, test_obs) -> None:
###validation process###
for i in range(len(test_data.index.unique())):
action, _states = model.predict(test_obs)
test_obs, rewards, dones, info = test_env.step(action)
def get_validation_sharpe(iteration):
###Calculate Sharpe ratio based on validation results###
df_total_value = pd.read_csv('results/account_value_validation_{}.csv'.format(iteration), index_col=0)
df_total_value.columns = ['account_value_train']
df_total_value['daily_return'] = df_total_value.pct_change(1)
sharpe = (4 ** 0.5) * df_total_value['daily_return'].mean() / \
df_total_value['daily_return'].std()
return sharpe
def run_ensemble_strategy(df, unique_trade_date, rebalance_window, validation_window) -> None:
"""Ensemble Strategy that combines PPO, A2C and DDPG"""
print("============Start Ensemble Strategy============")
# for ensemble model, it's necessary to feed the last state
# of the previous model to the current model as the initial state
last_state_ensemble = []
ppo_sharpe_list = []
ddpg_sharpe_list = []
a2c_sharpe_list = []
model_use = []
# based on the analysis of the in-sample data
# turbulence_threshold = 140
insample_turbulence = df[(df.datadate < 20151000) & (df.datadate >= 20090000)]
insample_turbulence = insample_turbulence.drop_duplicates(subset=['datadate'])
insample_turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, .90)
start = time.time()
for i in range(rebalance_window + validation_window, len(unique_trade_date), rebalance_window):
print("============================================")
## initial state is empty
if i - rebalance_window - validation_window == 0:
# inital state
initial = True
else:
# previous state
initial = False
# Tuning trubulence index based on historical data
# Turbulence lookback window is one quarter
historical_turbulence = df[(df.datadate < unique_trade_date[i - rebalance_window - validation_window]) & (
df.datadate >= (unique_trade_date[i - rebalance_window - validation_window - 63]))]
historical_turbulence = historical_turbulence.drop_duplicates(subset=['datadate'])
historical_turbulence_mean = np.mean(historical_turbulence.turbulence.values)
if historical_turbulence_mean > insample_turbulence_threshold:
# if the mean of the historical data is greater than the 90% quantile of insample turbulence data
# then we assume that the current market is volatile,
# therefore we set the 90% quantile of insample turbulence data as the turbulence threshold
# meaning the current turbulence can't exceed the 90% quantile of insample turbulence data
turbulence_threshold = insample_turbulence_threshold
else:
# if the mean of the historical data is less than the 90% quantile of insample turbulence data
# then we tune up the turbulence_threshold, meaning we lower the risk
turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, 0.99)
print("turbulence_threshold: ", turbulence_threshold)
############## Environment Setup starts ##############
## training env
train = data_split(df, start=20090000, end=unique_trade_date[i - rebalance_window - validation_window])
env_train = DummyVecEnv([lambda: StockEnvTrain(train)])
## validation env
validation = data_split(df, start=unique_trade_date[i - rebalance_window - validation_window],
end=unique_trade_date[i - rebalance_window])
env_val = DummyVecEnv([lambda: StockEnvValidation(validation,
turbulence_threshold=turbulence_threshold,
iteration=i)])
obs_val = env_val.reset()
############## Environment Setup ends ##############
############## Training and Validation starts ##############
print("======Model training from: ", 20090000, "to ",
unique_trade_date[i - rebalance_window - validation_window])
print("======A2C Training========")
model_a2c = train_A2C(env_train, model_name="A2C_10k_dow_{}".format(i), timesteps=10000)
print("======A2C Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_a2c, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe_a2c = get_validation_sharpe(i)
print("A2C Sharpe Ratio: ", sharpe_a2c)
print("======PPO Training========")
model_ppo = train_PPO(env_train, model_name="PPO_100k_dow_{}".format(i), timesteps=50000)
print("======PPO Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_ppo, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe_ppo = get_validation_sharpe(i)
print("PPO Sharpe Ratio: ", sharpe_ppo)
print("======DDPG Training========")
model_ddpg = train_DDPG(env_train, model_name="DDPG_10k_dow_{}".format(i), timesteps=20000)
print("======DDPG Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_ddpg, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe_ddpg = get_validation_sharpe(i)
print("DDPG Sharpe Ratio: ", sharpe_ddpg)
ppo_sharpe_list.append(sharpe_ppo)
a2c_sharpe_list.append(sharpe_a2c)
ddpg_sharpe_list.append(sharpe_ddpg)
# Model Selection based on sharpe ratio
if (sharpe_ppo >= sharpe_a2c) & (sharpe_ppo >= sharpe_ddpg):
model_ensemble = model_ppo
model_use.append('PPO')
elif (sharpe_a2c > sharpe_ppo) & (sharpe_a2c > sharpe_ddpg):
model_ensemble = model_a2c
model_use.append('A2C')
else:
model_ensemble = model_ddpg
model_use.append('DDPG')
############## Training and Validation ends ##############
############## Trading starts ##############
print("======Trading from: ", unique_trade_date[i - rebalance_window], "to ", unique_trade_date[i])
print("Used Model: ", model_ensemble)
last_state_ensemble = DRL_prediction(df=df, model=model_ensemble, name="ensemble",
last_state=last_state_ensemble, iter_num=i,
unique_trade_date=unique_trade_date,
rebalance_window=rebalance_window,
turbulence_threshold=turbulence_threshold,
initial=initial)
print("============Trading Done============")
############## Trading ends ##############
end = time.time()
print("Ensemble Strategy took: ", (end - start) / 60, " minutes")
| 45.565217 | 118 | 0.635756 |
795490d2415f93990c8e0a510de7d53427822c5a | 9,497 | py | Python | games/twenty.py | JmanThunder/EpicBot | 502e926cf7bcac61bc2cbbe4a1f58e9a58a40c7e | [
"BSD-3-Clause"
] | 571 | 2021-02-08T06:59:15.000Z | 2022-03-28T08:08:24.000Z | games/twenty.py | JmanThunder/EpicBot | 502e926cf7bcac61bc2cbbe4a1f58e9a58a40c7e | [
"BSD-3-Clause"
] | 61 | 2021-03-02T22:10:30.000Z | 2022-03-26T14:45:42.000Z | games/twenty.py | JmanThunder/EpicBot | 502e926cf7bcac61bc2cbbe4a1f58e9a58a40c7e | [
"BSD-3-Clause"
] | 97 | 2021-02-16T16:26:08.000Z | 2022-03-22T17:22:35.000Z | import asyncio
import random
from copy import deepcopy as dc
import discord
async def play(ctx, bot):
"""Starts a 2048 game inside of Discord."""
board = [
["_", "_", "_", "_"],
["_", "_", "_", "_"],
["_", "_", "_", "_"],
["_", "_", "_", 2],
]
score = 0
total = 0
await ctx.send(
"Starting game...\nIf a reaction is not received every 5 minutes, the game will time out."
)
embed = discord.Embed(title=f"Score: **{score}**", description=f"```py\n{print_board(board)}\n```")
message = await ctx.send(embed=embed)
await message.add_reaction("\u2B05")
await message.add_reaction("\u27A1")
await message.add_reaction("\u2B06")
await message.add_reaction("\u2B07")
await message.add_reaction("\u274C")
def check(reaction, user):
return (
(user.id == ctx.author.id)
and (str(reaction.emoji) in ["\u2B06", "\u2B07", "\u2B05", "\u27A1", "\u274C"])
and (reaction.message.id == message.id)
)
while True:
try:
reaction, user = await bot.wait_for(
"reaction_add", check=check, timeout=300.0
)
except asyncio.TimeoutError:
await ctx.send("Ending Game...")
await message.delete()
return
else:
try:
await message.remove_reaction(str(reaction.emoji), ctx.author)
except discord.errors.Forbidden:
pass
if str(reaction.emoji) == "\u2B06":
msg, nb, total = execute_move("up", board)
elif str(reaction.emoji) == "\u2B07":
msg, nb, total = execute_move("down", board)
elif str(reaction.emoji) == "\u2B05":
msg, nb, total = execute_move("left", board)
elif str(reaction.emoji) == "\u27A1":
msg, nb, total = execute_move("right", board)
elif str(reaction.emoji) == "\u274C":
await ctx.send("Ending game")
await message.delete()
return
score += total
if msg == "Lost":
await ctx.send(
f"Oh no! It appears you have lost {ctx.author.mention}. You finished with a score of {score}!"
)
await message.delete()
return
board = nb
embed = discord.Embed(title=f"Score: **{score}**", description=f"```py\n{print_board(board)}\n```")
await message.edit(embed=embed)
def print_board(board):
col_width = max(len(str(word)) for row in board for word in row) + 2 # padding
whole_thing = ""
for row in board:
whole_thing += "".join(str(word).ljust(col_width) for word in row) + "\n"
return whole_thing
def execute_move(move, pboard):
board = dc(pboard)
total = 0
if move.lower() == "left":
nb, total = check_left(board)
for x in range(len(nb)):
while nb[x][0] == "_" and (nb[x][1] != "_" or nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][0] = nb[x][1]
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][1] == "_" and (nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][2] == "_" and (nb[x][3] != "_"):
nb[x][2] = nb[x][3]
nb[x][3] = "_"
if move.lower() == "right":
nb, total = check_right(board)
for x in range(len(nb)):
while nb[x][3] == "_" and (nb[x][2] != "_" or nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][3] = nb[x][2]
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][2] == "_" and (nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][1] == "_" and (nb[x][0] != "_"):
nb[x][1] = nb[x][0]
nb[x][0] = "_"
if move.lower() == "down":
nb = columize(board)
nb, total = check_down(nb)
for x in range(len(nb)):
while nb[x][0] == "_" and (nb[x][1] != "_" or nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][0] = nb[x][1]
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][1] == "_" and (nb[x][2] != "_" or nb[x][3] != "_"):
nb[x][1] = nb[x][2]
nb[x][2] = nb[x][3]
nb[x][3] = "_"
while nb[x][2] == "_" and (nb[x][3] != "_"):
nb[x][2] = nb[x][3]
nb[x][3] = "_"
nb = rowize(nb)
if move.lower() == "up":
nb = columize(board)
nb, total = check_up(nb)
for x in range(len(nb)):
while nb[x][3] == "_" and (nb[x][2] != "_" or nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][3] = nb[x][2]
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][2] == "_" and (nb[x][1] != "_" or nb[x][0] != "_"):
nb[x][2] = nb[x][1]
nb[x][1] = nb[x][0]
nb[x][0] = "_"
while nb[x][1] == "_" and (nb[x][0] != "_"):
nb[x][1] = nb[x][0]
nb[x][0] = "_"
nb = rowize(nb)
if (
nb != pboard
): # So the user doesn't make a move that doesn't change anything, and just add a number
some_message, nb = add_number(nb)
else:
some_message = ""
if some_message.startswith("Lost"):
return "Lost", nb, total
else:
return "", nb, total
def add_number(board):
try:
row = random.randint(0, 3)
except RecursionError:
return "Lost", board
if "_" in board[row]:
number_of_zeroes = board[row].count("_")
if number_of_zeroes == 1:
column = board[row].index("_")
else:
column = random.randint(0, 3)
while board[row][column] != "_":
column = random.randint(0, 3)
else:
result, board = add_number(board)
return result, board
joining = random.randint(0, 100)
if joining < 85:
joining = 2
else:
joining = 4
board[row][column] = joining
return "", board
def columize(board):
new_board = [[], [], [], []]
# Make first column
new_board[0].append(board[3][0])
new_board[0].append(board[2][0])
new_board[0].append(board[1][0])
new_board[0].append(board[0][0])
# Make second column
new_board[1].append(board[3][1])
new_board[1].append(board[2][1])
new_board[1].append(board[1][1])
new_board[1].append(board[0][1])
# Make third column
new_board[2].append(board[3][2])
new_board[2].append(board[2][2])
new_board[2].append(board[1][2])
new_board[2].append(board[0][2])
# Make fourth column
new_board[3].append(board[3][3])
new_board[3].append(board[2][3])
new_board[3].append(board[1][3])
new_board[3].append(board[0][3])
board = new_board
return board
def rowize(board):
new_board = [[], [], [], []]
# Make first row
new_board[0].append(board[0][3])
new_board[0].append(board[1][3])
new_board[0].append(board[2][3])
new_board[0].append(board[3][3])
# Make second row
new_board[1].append(board[0][2])
new_board[1].append(board[1][2])
new_board[1].append(board[2][2])
new_board[1].append(board[3][2])
# Make third row
new_board[2].append(board[0][1])
new_board[2].append(board[1][1])
new_board[2].append(board[2][1])
new_board[2].append(board[3][1])
# Make fourth row
new_board[3].append(board[0][0])
new_board[3].append(board[1][0])
new_board[3].append(board[2][0])
new_board[3].append(board[3][0])
board = new_board
return board
def check_left(board):
total = 0
for x in range(len(board)):
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
return board, total
def check_right(board):
total = 0
for x in range(len(board)):
board[x].reverse()
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
board[x].reverse()
return board, total
def check_up(board):
total = 0
for x in range(len(board)):
board[x].reverse()
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
board[x].reverse()
return board, total
def check_down(board):
total = 0
for x in range(len(board)):
for y in range(len(board[x])):
try:
if board[x][y + 1] != "_":
if board[x][y] == board[x][y + 1]:
board[x][y] = board[x][y] + board[x][y + 1]
total += board[x][y]
board[x][y + 1] = "_"
elif board[x][y + 2] != "_":
if board[x][y] == board[x][y + 2]:
board[x][y] = board[x][y] + board[x][y + 2]
total += board[x][y]
board[x][y + 2] = "_"
elif board[x][y + 3] != "_":
if board[x][y] == board[x][y + 3]:
board[x][y] = board[x][y] + board[x][y + 3]
total += board[x][y]
board[x][y + 3] = "_"
except IndexError:
pass
return board, total
| 28.778788 | 102 | 0.542592 |
795492a8ed08bc3552d0d7c1753604d5c2471d56 | 1,653 | py | Python | loss_folder/draw_loss.py | manhcntt21/TextNormSeq2Seq | 440b252bddc0c735f083acd51271f2056d088a0a | [
"MIT"
] | null | null | null | loss_folder/draw_loss.py | manhcntt21/TextNormSeq2Seq | 440b252bddc0c735f083acd51271f2056d088a0a | [
"MIT"
] | null | null | null | loss_folder/draw_loss.py | manhcntt21/TextNormSeq2Seq | 440b252bddc0c735f083acd51271f2056d088a0a | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
def get_data(f1,f2,f3,f4):
"""
four folder loss
"""
epoches = []
train_word = []
test_word = []
train_char = []
test_char = []
with open(f1,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
train_word.append(float(tmp[1])*100)
epoches.append(tmp[0])
with open(f2,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
test_word.append(float(tmp[1])*100)
with open(f3,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
train_char.append(float(tmp[1])*100)
with open(f4,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
test_char.append(float(tmp[1])*100)
return epoches, train_word, test_word, train_char, test_char
def reduce_array(array):
tmp = []
tmp.append(array[0])
for i,j in enumerate(array):
if i%9 == 9:
tmp.append(j)
print(tmp)
def draw(epoches, train_word, test_word, train_char, test_char):
plt.plot(epoches,train_word, label = 'train_word')
plt.plot(epoches,test_word, label = 'test_word')
plt.plot(epoches,train_char, label = 'train_char')
plt.plot(epoches,test_char, label = 'test_char')
plt.legend(loc='best')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
plt.xticks(np.arange(1,len(train_word),step=10))
plt.savefig('loss_13_1.png')
if __name__ == "__main__":
epoches = []
train_word = []
test_word = []
train_char = []
test_char = []
epoches, train_word, test_word, train_char, test_char = get_data("./word/train.txt","./word/test.txt","./spelling/train.txt","./spelling/test.txt")
draw(epoches, train_word, test_word, train_char, test_char)
| 25.430769 | 149 | 0.660617 |
795492b8c20ddb3593379dcc68b29e9350d790c5 | 3,085 | py | Python | ptth/session.py | wagamama/python-ptth | 589f1d2bff178cc34ad2e71328c4a4d65e690df6 | [
"Apache-2.0"
] | null | null | null | ptth/session.py | wagamama/python-ptth | 589f1d2bff178cc34ad2e71328c4a4d65e690df6 | [
"Apache-2.0"
] | 5 | 2015-09-10T15:51:01.000Z | 2015-12-11T13:55:37.000Z | ptth/session.py | wagamama/python-ptth | 589f1d2bff178cc34ad2e71328c4a4d65e690df6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import threading
import socket
from select import select
from urlparse import urlparse
from .header import Headers
from .request import Request
from .response import Response
from .exception import *
class Handler(object):
def ready_to_handle(self):
print 'ready to handle'
def handle_request(self, request):
print 'handle request'
def handle_error(self, error):
print 'handle error'
def handle_close(self):
print 'handle close'
class Session(threading.Thread):
def __init__(self, handler):
super(Session, self).__init__()
self._handler = handler
self._upgraded = False
self._stop_event = threading.Event()
self._socket = None
def _init_url(self, url):
result = urlparse(url)
if result.scheme == 'http':
self._host = result.hostname
self._port = result.port if result.port else 80
self._url = result.path
else:
raise PtthUrlError()
def _ptth_headers(self):
return Headers({
'Connection': 'Upgrade',
'Upgrade': 'PTTH/1.0'})
def _init_ptth_request(self, url, headers):
self._init_url(url)
ptth_headers = self._ptth_headers()
ptth_headers.add(headers)
self._ptth_request = Request('POST', self._url, ptth_headers)
def _check_ptth_response(self, data):
response = Response.load(data)
if response.status == 101:
self._upgraded = True
else:
raise PtthUpgradeFailed()
def serve(self, url, headers=None):
self._init_ptth_request(url, headers)
self.start()
def run(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((self._host, self._port))
self._socket.send(self._ptth_request.dump())
while True:
r_list, _, x_list = select([self._socket], [], [self._socket])
if len(x_list) > 0:
if not self._stop_event.is_set():
self_handler.handle_error()
if self._stop_event.is_set():
self._socket.close()
break
if len(r_list) > 0:
data = self._socket.recv(4096)
if not data:
break
elif not self._upgraded:
try:
self._check_ptth_response(data)
except PtthError as e:
self._handler.handle_error(e)
else:
self._handler.ready_to_handle()
else:
request = Request.load(data)
response = self._handler.handle_request(request)
if response is not None:
self._socket.send(response.dump())
self._handler.handle_close()
def close(self):
self._stop_event.set()
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RD)
| 29.380952 | 74 | 0.565964 |
795492bb02e72f603bc7f72599a04b76f2601018 | 13,051 | py | Python | userbot/modules/afk.py | Abucuyy/Uciha | 726e9cd61eabf056064e40f7b322d8993161e52a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/afk.py | Abucuyy/Uciha | 726e9cd61eabf056064e40f7b322d8993161e52a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/afk.py | Abucuyy/Uciha | 726e9cd61eabf056064e40f7b322d8993161e52a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2020-09-05T12:45:31.000Z | 2020-09-25T09:04:29.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
# All Credits to https://t.me/azrim89 for timestamp.
# All Credits to https://t.me/Devp73 for Offline stamps..
#
""" Userbot module which contains afk-related commands """
from datetime import datetime
import time
from random import choice, randint
from asyncio import sleep
from telethon.events import StopPropagation
from telethon.tl.functions.account import UpdateProfileRequest
from userbot import (AFKREASON, COUNT_MSG, CMD_HELP, ISAFK, BOTLOG,
BOTLOG_CHATID, USERS, PM_AUTO_BAN, bot, ALIVE_NAME, is_redis_alive)
from userbot.events import register
# ========================= CONSTANTS ============================
AFKSTR = [
"`I'm busy right now. Please talk in a bag and when I come back you can just give me the bag!`",
"I'm away right now. If you need anything, leave a message after the beep:\n`beeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeep`!",
"`You missed me, next time aim better.`",
"`I'll be back in a few minutes and if I'm not...,\nwait longer.`",
"`I'm not here right now, so I'm probably somewhere else.`",
"`Roses are red,\nViolets are blue,\nLeave me a message,\nAnd I'll get back to you.`",
"`Sometimes the best things in life are worth waiting for…\nI'll be right back.`",
"`I'll be right back,\nbut if I'm not right back,\nI'll be back later.`",
"`If you haven't figured it out already,\nI'm not here.`",
"`Hello, welcome to my away message, how may I ignore you today?`",
"`I'm away over 7 seas and 7 countries,\n7 waters and 7 continents,\n7 mountains and 7 hills,\n7 plains and 7 mounds,\n7 pools and 7 lakes,\n7 springs and 7 meadows,\n7 cities and 7 neighborhoods,\n7 blocks and 7 houses...\n\nWhere not even your messages can reach me!`",
"`I'm away from the keyboard at the moment, but if you'll scream loud enough at your screen, I might just hear you.`",
"`I went that way\n---->`",
"`I went this way\n<----`",
"`Please leave a message and make me feel even more important than I already am.`",
"`I am not here so stop writing to me,\nor else you will find yourself with a screen full of your own messages.`",
"`If I were here,\nI'd tell you where I am.\n\nBut I'm not,\nso ask me when I return...`",
"`I am away!\nI don't know when I'll be back!\nHopefully a few minutes from now!`",
"`I'm not available right now so please leave your name, number, and address and I will stalk you later.`",
"`Sorry, I'm not here right now.\nFeel free to talk to my userbot as long as you like.\nI'll get back to you later.`",
"`I bet you were expecting an away message!`",
"`Life is so short, there are so many things to do...\nI'm away doing one of them..`",
"`I am not here right now...\nbut if I was...\n\nwouldn't that be awesome?`",
]
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
AFKSK = str(choice(AFKSTR))
# ============================================
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
afk_start = {}
# =================================================================
@register(outgoing=True, pattern="^.afk(?: |$)(.*)", disable_errors=True)
async def set_afk(afk_e):
""" For .afk command, allows you to inform people that you are afk when they message you """
message = afk_e.text
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
user = await bot.get_me()
global reason
USER_AFK = {}
afk_time = None
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(f"**Going AFK!**\
\nReason: `{string}`")
else:
await afk_e.edit("**Going AFK!**")
if user.last_name:
await afk_e.client(UpdateProfileRequest(first_name=user.first_name, last_name=user.last_name + " [ OFFLINE ]"))
else:
await afk_e.client(UpdateProfileRequest(first_name=user.first_name, last_name=" [ OFFLINE ]"))
if BOTLOG:
await afk_e.client.send_message(BOTLOG_CHATID, "#AFK\nYou went AFK!")
ISAFK = True
afk_time = datetime.now() # pylint:disable=E0602
raise StopPropagation
@register(outgoing=True)
async def type_afk_is_not_true(notafk):
""" This sets your status as not afk automatically when you write something while being afk """
global ISAFK
global COUNT_MSG
global USERS
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
user = await bot.get_me()
last = user.last_name
if last and last.endswith(" [ OFFLINE ]"):
last1 = last[:-12]
else:
last1 = ""
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.respond("**My Master is back !**")
time.sleep(3)
await msg.delete()
await notafk.client(UpdateProfileRequest(first_name=user.first_name, last_name=last1))
if BOTLOG:
await notafk.client.send_message(
BOTLOG_CHATID,
"You've recieved " + str(COUNT_MSG) + " messages from " +
str(len(USERS)) + " chats while you were away",
)
for i in USERS:
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" + str(i) + ")" +
" sent you " + "`" + str(USERS[i]) + " messages`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
@register(incoming=True, disable_edited=True)
async def mention_afk(mention):
""" This function takes care of notifying the people who mention you that you are AFK."""
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
user = await bot.get_me()
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**a while ago**"
if mention.message.mentioned and not (await mention.get_sender()).bot:
if ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h {int(minutes)}m`"
elif minutes > 0:
afk_since = f"`{int(minutes)}m {int(seconds)}s`"
else:
afk_since = f"`{int(seconds)}s`"
if mention.sender_id not in USERS:
if AFKREASON:
await mention.reply(f"My Master **{DEFAULTUSER}** Is still **afk since** {afk_since}.\
\n**Because My Master is** `{AFKREASON}`")
else:
await mention.reply(f"My Master 👑 {DEFAULTUSER} 👑 is **afk Since** {afk_since}.\nand My Master Has Left a Word for You Only: \n{AFKSK}\n`.` ")
USERS.update({mention.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif mention.sender_id in USERS:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply(f"My Master **{DEFAULTUSER}** Is still **afk since** {afk_since}.\
\n**Because My Master is** `{AFKREASON}`")
else:
await mention.reply(f"My Master 👑 {DEFAULTUSER} 👑 is **afk Since** {afk_since}.\nand My Master Has Left a Word for You Only: \n{AFKSK}\n`.` ")
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@register(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
""" Function which informs people that you are AFK in PM """
global ISAFK
global USERS
global COUNT_MSG
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
user = await bot.get_me()
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**a while ago**"
if sender.is_private and sender.sender_id != 777000 and not (
await sender.get_sender()).bot:
if PM_AUTO_BAN:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h {int(minutes)}m`"
elif minutes > 0:
afk_since = f"`{int(minutes)}m {int(seconds)}s`"
else:
afk_since = f"`{int(seconds)}s`"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply(f"My Master **{DEFAULTUSER}** is **afk since** {afk_since}.\
\n**Because My Master is** `{AFKREASON}`")
else:
await sender.reply(f"My Master 👑 {DEFAULTUSER} 👑 is **afk Since** {afk_since}.\nand My Master Has Left a Word for You Only: \n{AFKSK}\n`.` ")
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv and sender.sender_id in USERS:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply(f"My Master **{DEFAULTUSER}** Is **still afk since** {afk_since}.\
\n**Because My Master is** `{AFKREASON}`")
else:
await sender.reply(f"My Master 👑 {DEFAULTUSER} 👑 is **afk Since** {afk_since}.\nand My Master Has Left a Word for You Only: \n{AFKSK}\n`.` ")
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
CMD_HELP.update({
"afk":
"`.afk` [Optional Reason]\
\nUsage: Sets you as afk.\nReplies to anyone who tags/PM's \
you telling them that you are AFK(reason).\n\nSwitches off AFK when you type back anything, anywhere.\
"
})
| 44.695205 | 276 | 0.55988 |
795493286a8d67a0b84f9c4864511544863f318b | 128 | py | Python | cv/AlexNet/__init__.py | lnblanke/OCR | b235faa85fedd9f764f71ea592e8693a2a7ac42a | [
"MIT"
] | null | null | null | cv/AlexNet/__init__.py | lnblanke/OCR | b235faa85fedd9f764f71ea592e8693a2a7ac42a | [
"MIT"
] | null | null | null | cv/AlexNet/__init__.py | lnblanke/OCR | b235faa85fedd9f764f71ea592e8693a2a7ac42a | [
"MIT"
] | null | null | null | # @Time: 9/19/2021
# @Author: lnblanke
# @Email: fjh314.84@gmail.com
# @File: __init__.py.py
from AlexNet import createAlexNet
| 18.285714 | 33 | 0.71875 |
79549336a4241631b02e785750cc140cfc8710c5 | 15,643 | py | Python | crawler/crawler_2.py | marxlee/py-tools | 4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef | [
"Apache-2.0"
] | null | null | null | crawler/crawler_2.py | marxlee/py-tools | 4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef | [
"Apache-2.0"
] | null | null | null | crawler/crawler_2.py | marxlee/py-tools | 4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef | [
"Apache-2.0"
] | null | null | null |
from urllib import request
import random
import json
# 摸你请求头
url = r'https://www.baidu.com/s?cl=3&tn=baidutop10&fr=top1000&wd=%E7%9F%B3%E7%94%B0%E7%BA%AF%E4%B8%80%E6%84%9F%E6%9F%93%E6%96%B0%E5%86%A0&rsv_idx=2&rsv_dl=fyb_n_homepage&hisfilter=1'
# 代理列表
agent_list = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1',
]
#头信息
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}
# 随机代理
agent = random.choice(agent_list)
headers['User-Agent'] = agent
# 方法体
def print_url(url, header):
# 设置超时时间处理
time_out = 1
req_str = request.Request(url=url, headers=header)
try:
resp = request.urlopen(req_str, timeout=time_out)
data = resp.read().decode()
print(data)
except:
print("超时")
finally:
request.urlcleanup()
def print_url_http(url, header):
'''
GET
POST
PUT
DELETE
UPDATE
HEAD
OPTIONS
'''
json.loads()
pass
def get_json_val():
str = data_json_str()
jd = json.loads(str)
print(jd['sodar_query_id'])
# json = data_json()
# print(json['sodar_query_id'])
def data_json():
data = {"sodar_query_id":"YcqaXvPrIMSW2QTPjZeQAQ","injector_basename":"sodar2","bg_hash_basename":"r_kJ4x66L0q9ptqPN1EZdQZJVGt7LCWecB4z-4tOz0Y","bg_binary":"ALzbBj814lyYEaftZLAVu8KNpcS+Et40flMgUba+katdDRF9kHyC5ekeOn+SnF/oOv/75lAHEFYOblxjV5F4SQhJh/HX5oNaB6yQEscwY+2xY7zf1AOQAdXlwstcQsfcf91ydo9bJs3/nAnh41iqmA3KkV9TfstrgriG5sc8NSoUWQywuHf7ZDZeun3Y92u01kXYPGO8rYRMrwOmuOuo1G4VKz01yCxYiTBspPgxnf7FUa45yXGKR151XIRz4IxwZBgy/9IfJW7j0hUjlY/0miYrdQDTKGXvXdhU+YZvQF9FqLDIrYhg5FTB7SlWwIxZrImc8w8pALEU2idJLMue130yPHz7GfnNs6cIoIb8v+Y5v78QUCPflrJP6GxBEej+a3Fmb2hm7pk2iK4hbMb3guNpMSIou8PIP4nd5KQrpDzuG/WOiaSZIuMfkYYifAhSdi6nam3SMto07vPYW4L1XOy4QCvmkbrMwE8A8FLNrC6IzhIPi3cURKXSE6sI/UFoo8jBYaD/961bsfjDRip/stsq5XCf+P2EhgLW9Yl95ddjtReaObOpV5Di5pMhexp0DaCjfmXZyOrZ+LA3UYcOarlSsAIEJZ85HTn7EiJl+DVPSXPmQSy8LAywMyAVuPtKwanswYNiqlYtayDAlPJI26Om2TOeZzO0lRASIyxK6zkms+YajVYJ1z2wNvnv81D1PzH5N9YbWjImivcqNOHZxF/88olXY6oHG+zBqOVTOLyFahFjD7ftMXKFncA9mnEKC/UNXEkdClNu8B63x/aUHyb4u398Eru3PAupW6gnasf404viputMyvkrGgr7AhTRVJNK4Zt5GoQ8znxJCJZ0TRrGH4XgKFIkcgYopx4fmYGc5hP4q4mqFDouvH/Q0NGjx2YpICYE5CSfG1iIV76XO6nTrZ7Fn4zfE+mkgmm7LU/yAGXu2mjeTL0K2nEyOtgcuxq5POsRRtyN3BpNFRZDG06NxTEVZPbbRnm6aEaL4dntcmYsrLu2bFw2nMywczkpyV3ld+jeItdjeLaeRMjEqxhfR21xsMg3AenilDzpPaYlBCosMK3h/MA1nCwLxGENmjHp4lFYPHJohRnMj2Bbs4ROeG7uZoVg/NTmNiagecZC3+xy7+e+hNSS1Dmdq/lSpYLwJPsgrRRutCBRY/Ie2rfToKEt5juHeg9ExyWA8QJpHOPmIwgvoTXlTjWnQoObJuvlwVlJiT3fFDhmox/tAtiy4HzzQeIXekN8mZu1Lee6qlJ0HFE5jP6FVfDZsdn1VPKe8l01YpktU107evEA8rzrdoTnpPAj+d0IRwTh0HylyKHuulw6RD1MOJxPHTY06aGf5IRjpsz+YOKLR/+UPGiTZq4fc12OXYI/rHZTEfcSQu+lkh2zi2q8NAcRBrexYG6WN9UQ7+q5bPxAOEKxtB265eA1JQVd13LIPlBEJEbNCcvBiQiAzA2wDEqR793VpC0EuCDXuCuHwYGuF23YaKqhOaapZS9xVT8aDwKpdo005BdGvyu5Bux2q23npsv3xDE++5F/ny3z57M1cbpfLJQ4YzMVFyNisvqR5rdY71Ms2mTXy/DyoS022LI21D1RMsc16qKD7oCm00M/ggQVC1X7tJDwl0oe/3iisPHUJRiI79FkGbazm9AbQQKUH2LnMPjZ6GEMLkVpQGhglE/yYwVVpsP/PRdK1Cdftg7OADzPty8G1Q5uFyvdmWmIuR5nbW9bebKvhYFCJZHm2DcWgu8tN5NG5/5lrGpqxoNqxaxPwzAocDdU0xwMajHidsg0nkMruMNd997EUOEIdHPvZZFbBG+4ZDZgaYLGRuxGF2lOYNNxMG7qZfoXV5Vw4h/G0Iy7hy6DXRnZCQWOXuGM6wGqwdG3yy085+gqnOyEclnbgsaVo7Ohz4P1u34rFRoSd+yoHs5Cy4iqCBZtu1o71jKxP+/yVbb+UGMNOOnSnrTO1Qs6MHYnQ+7yrN1AVKKwaNFFtsVKp4dW5vv0+6CJ1TmiEuVekSTR6pQ7FYjjvdAXwob0OZDFoxXY7kAFxrIuHXqgzJ0cG1DjxFJtV1JGCAU2vPtS6iYoNbpQX2GRMQx31yWVG4CO0IYJWjraUwvswrtIFbxkJMP2H8GF1AaV4gLV10ZbNsX8V1m0SwPsburH/3ECRLu3IpU6VLdP53WrtBxF4cidDtgaBin9NuQp0bP9wC3TIR0nZ2OD5yDRPw//pGAzZqIMLhvB2AbrLt4qCFvOWKDxJ39Thy9HOyqJh2DEZ/oWUr496RdSvmYqH5yn/pmYFN+gAqgB33wIsbYJxQtGfT2NsIS8yVka1031cP0azO43smM9dXbkU6HVaxOS5Y1U5PR9pjxAilePqS+PUVOIegsGpLfR4rfjXFQt72kpCTNKG+y8/XWH6Brb2THTzGEF1UrNUZfc6+jJ5fflgGAOuECRgzJwr9x0bToMdomF5vrbaLcGbX+Rqw7+ob7GQ5/E9UmFaAOOeDIGd0eX0hwLP1ZEKnkW+4LHFY5h1L51tUIZVPFnsJ1dxEeGXU7zp2SIJ8nbdcXO3WP6o9Q38Hrrw6udiFNZT9lhKujoBYgUZ/d0EDZCS0JuB/vR4u9uHKic0PBVeZpiUtjlaPJbrdHJK5J+JycwsifHqXKeMUDPOkNdPptuif8vsrXnpTgIqVEXFwYI1SCXr/0/hWhm3kz8ZVMPoPyPSehNFvD5/heLy4BCxaW60SjKfDMWiyliTQRDFsnFJZ+CguIE9tYjkwkdtv6yRQI70ltEWhYEHsX0+uZdixmo3wMPT7xjT6wL7891UFDJIFy8WtwTj5VzdN5nSgwlh+yGF9Djn9ihLSN5EebavuLDiJYNlvVOA2mMKSdeB8jvFcwyH5Q8opwQZUWrdrahdkTRK98S3HoGlyMx2u5x+YUgNxrKUJZxfbI/53aDuS2BV2LY2jtVnXQohEll0afDuVvmWNfJ8SQ2tHwX/YWuYYFKUg05ZF8yfxBdn9oezJMLorAa4wyomHtoowUL2j1ITOYZaG46V+sC6Uwf1T9VCDA3Dyugwz34e+NErKouptm99HeY22BzpTvUutUGo4/0m5Wt5CvbX1fEBeTWMb6BZ4sdP/PxJpR+vxBIFStciwLqBYIlVF/TKzKK0OR4gZp/QF4Z2GZPQUSQ4ZMQST3zhcMIsxNnzThwhDQifjvlTBhfM5bNtV6mNtPzQ9UbY5Qk6/88YFt5jJPaVhnfnaZtC9D7WlO3aNSIJ8QmNhg3J3dp6BiCjKMzBjCkXmlOcWGjTO5oQ1p2HKUubHNxQDpmmLthX8n15qLusnaQUeKSf+vFxcneT4DicqBNpECnPSfbwcIZqbDpwGLjNsRNebJwEI2xdbX+MBOPVQ303ptQHEMychPD+tbi7SCTIgJcHfAfRYAW5/AxbzelIwrk/6PC+a60CSW3OOLuOAoP5CLpeg+zRWW6CL5k9DdFDf4ve2vGu+k9V+2JagU56Ea8YCHOQ5VIzqkF3jIh6LkhCYmCyjFBGQLz4Cvu5OGI7TLC9v5/LQhshoqrEcc/JexcJzbx1i/l7In6HW5Zp1BpJvtruexwzKsbZKclmaG4HzPEGUKHgzwDDkMTFYSU2qPpncqPw6NtBp8og4n1KjyAXpfYecFU5tQVDyeUc7tMUgV0BE/WsXoheOKx7Cvo3bRuySPhSih+PBGp6FzP/S/rLxPOmZ/Lcf2F0IXXtR0Cj4gHXhigNou+PrhTgmeW1ayRnYYJ8Ps5JCP5nW5i2EAlH5SvcyAaoXIb2T3l1z7TmEEVLMRC3k5d+fqxB1AEIYZLvLMoCO2tFBh6L7u3Vyh/k0SchaqKKI9U/JVG/l4QwFqpZ8E+C/p15UVgwMwHaAFBKULWncbwNiSk0R2H46n5Ol7+2kv2yfkFvdYrf7VsKD76/6JOCQydMM2BKmL1NL91N+Yd0hmaYBrrFIxVzxkjP8VULgCRwylKpsTBdYp0nvfVeWU+vq1CXy2hhOxzWMVRmMAE9FO6Fux0fprVdrkxDgLk50mhP7Eq8kfnzpXc3ItSgAddB1JCvUdYzhnsQh+F/viDl5iub0LIeF+Kp+HyemXDTkf9OVM1DGwp3CxgNIam2Z1/UxTVC76H8cKhjeo8yOhzoVF0p46N/o2eOmhB55ZcWKvFESKuRMbV+MjcSAhWE+76v8VgxrfwoIfhg2YlwLfMTiapbfMZ5tSh5rutxOuReIAbh8Mo/IYBesQQ2SybvA2GFg7Mcfe2rC+LEIhwXkm4GZkFahH9UWw4m1VUBmty2V9GcIUwp1/vUNfBCvDA8zyM7+r6P1SHjU4DkKVa0qIqF7AEwqASIbg2gjDMuxHyZ+c1izFQLu/8Nf3WFZUNcpMy92jd+wjICK0HzTKJYUVmraEPAQ96bvuibSo9COX9jAhC0xiG6AXurIm+bExk7Bq49uzkDe2AuK8xc3/ygHsr1pqCP/W99SKv2pds52hZb+ezghamFhznJ67EZIWawes9YJ1khIX6i2/N5qTvgFjv4C7d5IQVuMJgY9On9IbwuLJXnr8Shmy7vcc57b2irRiuKmDW4Vc4SBpRwW7wgvjpeuTwvsZyQgDrWFpKvY8PgrOK9MkXdnLPg3kkgFZF7CVHsogJZa3CVoA9uS4D7RT5hm9gsdVkxMkop+//w5bg1+fm/hrGD8wSmYNzLvld6IJOZxQWhE5JPe+WNzC5zEITxZGomzdKYDHRqp+0tQF8xVyHyZPuWPSgqAE/e5jyJ5m/sBa5Vl5oyKxajcv+gKZJhPiOfMLvgX7/+I8mFVccLz4kljK0KUhIScmYQBjWpAlN8JE2yzh2KmEhiTGqNsA9D9MbsRxZ3O3v9GauT2TYcH/EQCLvqftFn05a4Asz/car34eE7UcMcYvUvn0FYiIpHWmxHXAVCxZQ7+u4XQr/ulMxjKgOOeVFBfYcYl5uBc+U/UWM2nimDDF8q3Ugyybv6lTTke31qSGAqYvZLfHCV2CGK/Z2a83Fq6QOROsSdL1pntMU2jNLt6hC3XXzzeATmGTWPxuJXikRvueMc097kOn6G0NyU0qK4HDvymMcPhlibsSiBIPnoUzv6Had7ED6A7ccKy8hzk9ZZx0BGMoZjnAlpJJGK7HC57yTzsg05tX7NRcP5r9MNN/uBF9nJzY5ggZaQIETXUhfoxCfwY/Ce6nP0iHFHdPlsCbydHefp1dgyjPzQMvI6l9OG9n3OSLh9+rKmYQMyz1pi4aHcvt8CzqYhRKlPQEP1xNchQ0IXBhrm2Mi7SER0nimnz07nF1Ki9mPGk757hCsQz+xGwOj7oz1YeCtFT7vISs/kX9zeOtcpnfUlS0roQkwz1tQU2aTsZ5A42vyFRKRE0rv1KASXsiDNZd0/jkhmcneYQxD3L0ttYjsUg2BP/clXNyVWEoTsPs17xtZb+zZ0bAo29G0CEmFlx9n7PewUJOEqzv0s/W9jP0iIBNEsQ9mWQr6Brar3wQRrfjLk6ip8HUNh+YhhSjW0eSA9NsgQE6GaPKaGe03dNQOk8Yu5O1WrNOP+/Wjn2vWTb8TMbusjEgGG7BjGM5YlchUSurpXob/EPZAaR9gbMPt4CtHKUhB87t256CPGqoYxAVNcEhglUOM/p9hEjwkKZ3dB0AOqKswNtb+Nja9vgMFFCte6dOTXDRuHlyKL6IenAIo+5JBYX15WlGhCHiiWXQpbJoFbjeie3fxjGDjRzr8us5tvKUHXQJQCVW6SlKk1uFImLIdngwkXUpv2hypJX8KRtf4uLPu3+x50HIS5g38o9wdVgPjcPxAIEB3fcyEl0IWAx1eUm1LU8h11yx+gzQ/snBaV2vt1VEvLtNtPFZVYvIDuSpsWY8bv8owdZd4wHB1lJZgAp9bBiSTGGEJMlCOuu4lQDOL/Aj3XMW8SSg5zTZblxdxayss3hIkrtoct1YVxe0itQSpG/OR+m3ZNOLr43J2gFN3MagHZwPuGBZC0kW+7nyZM7Sp7FZA/1+A08ddSL3luh/dCaPTVtk6tY1q1t9JH6dcsl77+Kh4nslE0YRA0qQQQIsqz75n7Bu05aFw+g6oYBgqAs4p0uVoWSKtTtfucPHy8gwCn8lh8jeIpk0mWS64OXXPWqyPptuCOZvJPemmP5uYB9MWLrf1QZmZMWgVZHuMmQXXobMTjGz+Dsw/eEVP+nVL8ftDDxwEDT0XpUckl0v3Qt3Np44jFKNLIcm6CIobyN0QQuouOZEmAVVXcJP6NYclNMd3zdKoVVGzFZS0GqX1Qmw+U4rlS0Knl9p2vDtP/HMWcCtnTNP9KZjRF6sJr2Vu+/4oi4f0JwvbUrHdkcED64VFA53ZxvqAKIPE1ebZjFq6SH6BXXl+CkWGqBUAe4HGh+u1QEKNPGA4ETZV4GNTOKbCP98CEmzf7Vo2nxTZ+0F34OUgMtQgrLTYcy0yZLB/Dk7nCgFO3zRLsNZUpX+KQRkSZ/aqiXJpwDRDh4aL2e40ENPHVI5nbWvuQaT44TG8WMIL60jr5WKgj921RMDAeCWipSP6LLtCHwZrTc2UiJugF/AC2WgY4L3/T0MTIK2"}
return data
def data_json_str():
data = {"sodar_query_id": "YcqaXvPrIMSW2QTPjZeQAQ", "injector_basename": "sodar2",
"bg_hash_basename": "r_kJ4x66L0q9ptqPN1EZdQZJVGt7LCWecB4z-4tOz0Y",
"bg_binary": "ALzbBj814lyYEaftZLAVu8KNpcS+Et40flMgUba+katdDRF9kHyC5ekeOn+SnF/oOv/75lAHEFYOblxjV5F4SQhJh/HX5oNaB6yQEscwY+2xY7zf1AOQAdXlwstcQsfcf91ydo9bJs3/nAnh41iqmA3KkV9TfstrgriG5sc8NSoUWQywuHf7ZDZeun3Y92u01kXYPGO8rYRMrwOmuOuo1G4VKz01yCxYiTBspPgxnf7FUa45yXGKR151XIRz4IxwZBgy/9IfJW7j0hUjlY/0miYrdQDTKGXvXdhU+YZvQF9FqLDIrYhg5FTB7SlWwIxZrImc8w8pALEU2idJLMue130yPHz7GfnNs6cIoIb8v+Y5v78QUCPflrJP6GxBEej+a3Fmb2hm7pk2iK4hbMb3guNpMSIou8PIP4nd5KQrpDzuG/WOiaSZIuMfkYYifAhSdi6nam3SMto07vPYW4L1XOy4QCvmkbrMwE8A8FLNrC6IzhIPi3cURKXSE6sI/UFoo8jBYaD/961bsfjDRip/stsq5XCf+P2EhgLW9Yl95ddjtReaObOpV5Di5pMhexp0DaCjfmXZyOrZ+LA3UYcOarlSsAIEJZ85HTn7EiJl+DVPSXPmQSy8LAywMyAVuPtKwanswYNiqlYtayDAlPJI26Om2TOeZzO0lRASIyxK6zkms+YajVYJ1z2wNvnv81D1PzH5N9YbWjImivcqNOHZxF/88olXY6oHG+zBqOVTOLyFahFjD7ftMXKFncA9mnEKC/UNXEkdClNu8B63x/aUHyb4u398Eru3PAupW6gnasf404viputMyvkrGgr7AhTRVJNK4Zt5GoQ8znxJCJZ0TRrGH4XgKFIkcgYopx4fmYGc5hP4q4mqFDouvH/Q0NGjx2YpICYE5CSfG1iIV76XO6nTrZ7Fn4zfE+mkgmm7LU/yAGXu2mjeTL0K2nEyOtgcuxq5POsRRtyN3BpNFRZDG06NxTEVZPbbRnm6aEaL4dntcmYsrLu2bFw2nMywczkpyV3ld+jeItdjeLaeRMjEqxhfR21xsMg3AenilDzpPaYlBCosMK3h/MA1nCwLxGENmjHp4lFYPHJohRnMj2Bbs4ROeG7uZoVg/NTmNiagecZC3+xy7+e+hNSS1Dmdq/lSpYLwJPsgrRRutCBRY/Ie2rfToKEt5juHeg9ExyWA8QJpHOPmIwgvoTXlTjWnQoObJuvlwVlJiT3fFDhmox/tAtiy4HzzQeIXekN8mZu1Lee6qlJ0HFE5jP6FVfDZsdn1VPKe8l01YpktU107evEA8rzrdoTnpPAj+d0IRwTh0HylyKHuulw6RD1MOJxPHTY06aGf5IRjpsz+YOKLR/+UPGiTZq4fc12OXYI/rHZTEfcSQu+lkh2zi2q8NAcRBrexYG6WN9UQ7+q5bPxAOEKxtB265eA1JQVd13LIPlBEJEbNCcvBiQiAzA2wDEqR793VpC0EuCDXuCuHwYGuF23YaKqhOaapZS9xVT8aDwKpdo005BdGvyu5Bux2q23npsv3xDE++5F/ny3z57M1cbpfLJQ4YzMVFyNisvqR5rdY71Ms2mTXy/DyoS022LI21D1RMsc16qKD7oCm00M/ggQVC1X7tJDwl0oe/3iisPHUJRiI79FkGbazm9AbQQKUH2LnMPjZ6GEMLkVpQGhglE/yYwVVpsP/PRdK1Cdftg7OADzPty8G1Q5uFyvdmWmIuR5nbW9bebKvhYFCJZHm2DcWgu8tN5NG5/5lrGpqxoNqxaxPwzAocDdU0xwMajHidsg0nkMruMNd997EUOEIdHPvZZFbBG+4ZDZgaYLGRuxGF2lOYNNxMG7qZfoXV5Vw4h/G0Iy7hy6DXRnZCQWOXuGM6wGqwdG3yy085+gqnOyEclnbgsaVo7Ohz4P1u34rFRoSd+yoHs5Cy4iqCBZtu1o71jKxP+/yVbb+UGMNOOnSnrTO1Qs6MHYnQ+7yrN1AVKKwaNFFtsVKp4dW5vv0+6CJ1TmiEuVekSTR6pQ7FYjjvdAXwob0OZDFoxXY7kAFxrIuHXqgzJ0cG1DjxFJtV1JGCAU2vPtS6iYoNbpQX2GRMQx31yWVG4CO0IYJWjraUwvswrtIFbxkJMP2H8GF1AaV4gLV10ZbNsX8V1m0SwPsburH/3ECRLu3IpU6VLdP53WrtBxF4cidDtgaBin9NuQp0bP9wC3TIR0nZ2OD5yDRPw//pGAzZqIMLhvB2AbrLt4qCFvOWKDxJ39Thy9HOyqJh2DEZ/oWUr496RdSvmYqH5yn/pmYFN+gAqgB33wIsbYJxQtGfT2NsIS8yVka1031cP0azO43smM9dXbkU6HVaxOS5Y1U5PR9pjxAilePqS+PUVOIegsGpLfR4rfjXFQt72kpCTNKG+y8/XWH6Brb2THTzGEF1UrNUZfc6+jJ5fflgGAOuECRgzJwr9x0bToMdomF5vrbaLcGbX+Rqw7+ob7GQ5/E9UmFaAOOeDIGd0eX0hwLP1ZEKnkW+4LHFY5h1L51tUIZVPFnsJ1dxEeGXU7zp2SIJ8nbdcXO3WP6o9Q38Hrrw6udiFNZT9lhKujoBYgUZ/d0EDZCS0JuB/vR4u9uHKic0PBVeZpiUtjlaPJbrdHJK5J+JycwsifHqXKeMUDPOkNdPptuif8vsrXnpTgIqVEXFwYI1SCXr/0/hWhm3kz8ZVMPoPyPSehNFvD5/heLy4BCxaW60SjKfDMWiyliTQRDFsnFJZ+CguIE9tYjkwkdtv6yRQI70ltEWhYEHsX0+uZdixmo3wMPT7xjT6wL7891UFDJIFy8WtwTj5VzdN5nSgwlh+yGF9Djn9ihLSN5EebavuLDiJYNlvVOA2mMKSdeB8jvFcwyH5Q8opwQZUWrdrahdkTRK98S3HoGlyMx2u5x+YUgNxrKUJZxfbI/53aDuS2BV2LY2jtVnXQohEll0afDuVvmWNfJ8SQ2tHwX/YWuYYFKUg05ZF8yfxBdn9oezJMLorAa4wyomHtoowUL2j1ITOYZaG46V+sC6Uwf1T9VCDA3Dyugwz34e+NErKouptm99HeY22BzpTvUutUGo4/0m5Wt5CvbX1fEBeTWMb6BZ4sdP/PxJpR+vxBIFStciwLqBYIlVF/TKzKK0OR4gZp/QF4Z2GZPQUSQ4ZMQST3zhcMIsxNnzThwhDQifjvlTBhfM5bNtV6mNtPzQ9UbY5Qk6/88YFt5jJPaVhnfnaZtC9D7WlO3aNSIJ8QmNhg3J3dp6BiCjKMzBjCkXmlOcWGjTO5oQ1p2HKUubHNxQDpmmLthX8n15qLusnaQUeKSf+vFxcneT4DicqBNpECnPSfbwcIZqbDpwGLjNsRNebJwEI2xdbX+MBOPVQ303ptQHEMychPD+tbi7SCTIgJcHfAfRYAW5/AxbzelIwrk/6PC+a60CSW3OOLuOAoP5CLpeg+zRWW6CL5k9DdFDf4ve2vGu+k9V+2JagU56Ea8YCHOQ5VIzqkF3jIh6LkhCYmCyjFBGQLz4Cvu5OGI7TLC9v5/LQhshoqrEcc/JexcJzbx1i/l7In6HW5Zp1BpJvtruexwzKsbZKclmaG4HzPEGUKHgzwDDkMTFYSU2qPpncqPw6NtBp8og4n1KjyAXpfYecFU5tQVDyeUc7tMUgV0BE/WsXoheOKx7Cvo3bRuySPhSih+PBGp6FzP/S/rLxPOmZ/Lcf2F0IXXtR0Cj4gHXhigNou+PrhTgmeW1ayRnYYJ8Ps5JCP5nW5i2EAlH5SvcyAaoXIb2T3l1z7TmEEVLMRC3k5d+fqxB1AEIYZLvLMoCO2tFBh6L7u3Vyh/k0SchaqKKI9U/JVG/l4QwFqpZ8E+C/p15UVgwMwHaAFBKULWncbwNiSk0R2H46n5Ol7+2kv2yfkFvdYrf7VsKD76/6JOCQydMM2BKmL1NL91N+Yd0hmaYBrrFIxVzxkjP8VULgCRwylKpsTBdYp0nvfVeWU+vq1CXy2hhOxzWMVRmMAE9FO6Fux0fprVdrkxDgLk50mhP7Eq8kfnzpXc3ItSgAddB1JCvUdYzhnsQh+F/viDl5iub0LIeF+Kp+HyemXDTkf9OVM1DGwp3CxgNIam2Z1/UxTVC76H8cKhjeo8yOhzoVF0p46N/o2eOmhB55ZcWKvFESKuRMbV+MjcSAhWE+76v8VgxrfwoIfhg2YlwLfMTiapbfMZ5tSh5rutxOuReIAbh8Mo/IYBesQQ2SybvA2GFg7Mcfe2rC+LEIhwXkm4GZkFahH9UWw4m1VUBmty2V9GcIUwp1/vUNfBCvDA8zyM7+r6P1SHjU4DkKVa0qIqF7AEwqASIbg2gjDMuxHyZ+c1izFQLu/8Nf3WFZUNcpMy92jd+wjICK0HzTKJYUVmraEPAQ96bvuibSo9COX9jAhC0xiG6AXurIm+bExk7Bq49uzkDe2AuK8xc3/ygHsr1pqCP/W99SKv2pds52hZb+ezghamFhznJ67EZIWawes9YJ1khIX6i2/N5qTvgFjv4C7d5IQVuMJgY9On9IbwuLJXnr8Shmy7vcc57b2irRiuKmDW4Vc4SBpRwW7wgvjpeuTwvsZyQgDrWFpKvY8PgrOK9MkXdnLPg3kkgFZF7CVHsogJZa3CVoA9uS4D7RT5hm9gsdVkxMkop+//w5bg1+fm/hrGD8wSmYNzLvld6IJOZxQWhE5JPe+WNzC5zEITxZGomzdKYDHRqp+0tQF8xVyHyZPuWPSgqAE/e5jyJ5m/sBa5Vl5oyKxajcv+gKZJhPiOfMLvgX7/+I8mFVccLz4kljK0KUhIScmYQBjWpAlN8JE2yzh2KmEhiTGqNsA9D9MbsRxZ3O3v9GauT2TYcH/EQCLvqftFn05a4Asz/car34eE7UcMcYvUvn0FYiIpHWmxHXAVCxZQ7+u4XQr/ulMxjKgOOeVFBfYcYl5uBc+U/UWM2nimDDF8q3Ugyybv6lTTke31qSGAqYvZLfHCV2CGK/Z2a83Fq6QOROsSdL1pntMU2jNLt6hC3XXzzeATmGTWPxuJXikRvueMc097kOn6G0NyU0qK4HDvymMcPhlibsSiBIPnoUzv6Had7ED6A7ccKy8hzk9ZZx0BGMoZjnAlpJJGK7HC57yTzsg05tX7NRcP5r9MNN/uBF9nJzY5ggZaQIETXUhfoxCfwY/Ce6nP0iHFHdPlsCbydHefp1dgyjPzQMvI6l9OG9n3OSLh9+rKmYQMyz1pi4aHcvt8CzqYhRKlPQEP1xNchQ0IXBhrm2Mi7SER0nimnz07nF1Ki9mPGk757hCsQz+xGwOj7oz1YeCtFT7vISs/kX9zeOtcpnfUlS0roQkwz1tQU2aTsZ5A42vyFRKRE0rv1KASXsiDNZd0/jkhmcneYQxD3L0ttYjsUg2BP/clXNyVWEoTsPs17xtZb+zZ0bAo29G0CEmFlx9n7PewUJOEqzv0s/W9jP0iIBNEsQ9mWQr6Brar3wQRrfjLk6ip8HUNh+YhhSjW0eSA9NsgQE6GaPKaGe03dNQOk8Yu5O1WrNOP+/Wjn2vWTb8TMbusjEgGG7BjGM5YlchUSurpXob/EPZAaR9gbMPt4CtHKUhB87t256CPGqoYxAVNcEhglUOM/p9hEjwkKZ3dB0AOqKswNtb+Nja9vgMFFCte6dOTXDRuHlyKL6IenAIo+5JBYX15WlGhCHiiWXQpbJoFbjeie3fxjGDjRzr8us5tvKUHXQJQCVW6SlKk1uFImLIdngwkXUpv2hypJX8KRtf4uLPu3+x50HIS5g38o9wdVgPjcPxAIEB3fcyEl0IWAx1eUm1LU8h11yx+gzQ/snBaV2vt1VEvLtNtPFZVYvIDuSpsWY8bv8owdZd4wHB1lJZgAp9bBiSTGGEJMlCOuu4lQDOL/Aj3XMW8SSg5zTZblxdxayss3hIkrtoct1YVxe0itQSpG/OR+m3ZNOLr43J2gFN3MagHZwPuGBZC0kW+7nyZM7Sp7FZA/1+A08ddSL3luh/dCaPTVtk6tY1q1t9JH6dcsl77+Kh4nslE0YRA0qQQQIsqz75n7Bu05aFw+g6oYBgqAs4p0uVoWSKtTtfucPHy8gwCn8lh8jeIpk0mWS64OXXPWqyPptuCOZvJPemmP5uYB9MWLrf1QZmZMWgVZHuMmQXXobMTjGz+Dsw/eEVP+nVL8ftDDxwEDT0XpUckl0v3Qt3Np44jFKNLIcm6CIobyN0QQuouOZEmAVVXcJP6NYclNMd3zdKoVVGzFZS0GqX1Qmw+U4rlS0Knl9p2vDtP/HMWcCtnTNP9KZjRF6sJr2Vu+/4oi4f0JwvbUrHdkcED64VFA53ZxvqAKIPE1ebZjFq6SH6BXXl+CkWGqBUAe4HGh+u1QEKNPGA4ETZV4GNTOKbCP98CEmzf7Vo2nxTZ+0F34OUgMtQgrLTYcy0yZLB/Dk7nCgFO3zRLsNZUpX+KQRkSZ/aqiXJpwDRDh4aL2e40ENPHVI5nbWvuQaT44TG8WMIL60jr5WKgj921RMDAeCWipSP6LLtCHwZrTc2UiJugF/AC2WgY4L3/T0MTIK2"}
data = json.dumps(data)
return data
# 读取本地
def load_location():
with open('../files/json.txt', 'rt') as f:
text = f.read()
print(text)
print(type(text))
js = json.loads(text)
print(js['sodar_query_id'])
pass
# 写入本地
def write_location():
with open('../files/json.txt', 'rt') as f:
text = f.read()
with open('../files/json1.txt', 'w') as f1:
f1.write(text)
if __name__ == '__main__':
# print_url(url=url, header=headers)
# load_location()
write_location()
pass | 140.927928 | 6,714 | 0.915681 |
79549384aefe9b858f38adb563e209e0b63de922 | 1,163 | py | Python | tests/test_variable_registration.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | tests/test_variable_registration.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | tests/test_variable_registration.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | import angr
def test_registration():
s = angr.SimState(arch='AMD64')
a1 = s.solver.BVS('a', 64, key=(1,), eternal=True)
a2 = s.solver.BVS('a', 64, key=(1,), eternal=True)
assert a1 is a2
b1 = s.solver.BVS('b', 64, key=(2,), eternal=False)
s1 = s.copy()
s2 = s.copy()
b2 = s1.solver.BVS('b', 64, key=(2,), eternal=False)
b3 = s2.solver.BVS('b', 64, key=(2,), eternal=False)
assert b1 is not b2
assert b2 is not b3
assert b1 is not b3
a3 = s1.solver.BVS('a', 64, key=(1,), eternal=True)
a4 = s2.solver.BVS('a', 64, key=(1,), eternal=True)
assert a2 is a3
assert a3 is a4
assert len(list(s.solver.get_variables(1))) == 1
assert len(list(s1.solver.get_variables(1))) == 1
assert len(list(s2.solver.get_variables(1))) == 1
assert len(list(s.solver.get_variables(2))) == 1
assert len(list(s1.solver.get_variables(2))) == 2
assert len(list(s2.solver.get_variables(2))) == 2
assert list(s.solver.describe_variables(a1)) == [(1,)]
assert list(s.solver.describe_variables(b1)) == [(2, 1)]
assert sorted(list(s.solver.describe_variables(a1 + b1))) == [(1,), (2, 1)]
| 32.305556 | 79 | 0.604471 |
7954939966d0214e24947f21d6d71ac3487f0b66 | 31,851 | py | Python | async_hsm/__init__.py | SzeMengTan/async_hsm | 8e23a94c3e9d009536002d1469e81fd165a23e01 | [
"MIT"
] | null | null | null | async_hsm/__init__.py | SzeMengTan/async_hsm | 8e23a94c3e9d009536002d1469e81fd165a23e01 | [
"MIT"
] | null | null | null | async_hsm/__init__.py | SzeMengTan/async_hsm | 8e23a94c3e9d009536002d1469e81fd165a23e01 | [
"MIT"
] | null | null | null | import asyncio
import collections
import signal
import traceback
import typing
from copy import copy
from functools import wraps
import attr
@attr.s(auto_attribs=True)
class Catch:
signal: str
handler: str
@attr.s(auto_attribs=True)
class State:
name: str
parent: typing.Optional[str]
catches: typing.List[Catch] = attr.Factory(list)
@attr.s(auto_attribs=True)
class HsmDescription:
initial: str
setup: typing.Optional[str]
signals: typing.List[str] = attr.Factory(list)
states: typing.List[State] = attr.Factory(list)
class Spy(object):
"""Spy is the debugging system for async_hsm.
async_hsm contains a handful of Spy.on_*() methods
placed at useful locations in the framework.
It is up to a Spy driver (such as the included VcdSpy)
to implement the Spy.on_*() methods.
The programmer calls Spy.enable_spy(<Spy implementation class>)
to activate the Spy system; otherwise, Spy does nothing.
Therefore, this class is designed so that calling Spy.anything()
is inert unless the application first calls Spy.enable_spy()
"""
_actv_cls = None
@staticmethod
def enable_spy(spy_cls):
"""Sets the Spy to use the given class
and calls its initializer.
"""
Spy._actv_cls = spy_cls
spy_cls.init()
def __getattr__(*args):
"""Returns
1) the enable_spy static method if requested by name, or
2) the attribute from the active class (if active class was set), or
3) a function that swallows any arguments and does nothing.
"""
if args[1] == "enable_spy":
return Spy.enable_spy
if Spy._actv_cls:
return getattr(Spy._actv_cls, args[1])
return lambda *x: None
# Singleton pattern:
# Turn Spy into an instance of itself so __getattribute__ works
# on anyone who calls "import Spy; Spy.foo()"
# This prevents Spy() from creating a new instance
# and gives everyone who calls "import Spy" the same object
Spy = Spy()
class Signal(object):
"""An asynchronous stimulus that triggers reactions.
A unique identifier that, along with a value, specifies an Event.
p. 154
"""
_registry = {} # signame:str to sigid:int
_lookup = [] # sigid:int to signame:str
@staticmethod
def exists(signame):
"""Returns True if signame is in the Signal registry.
"""
return signame in Signal._registry
@staticmethod
def register(signame):
"""Registers the signame if it is not already registered.
Returns the signal number for the signame.
"""
assert type(signame) is str
if signame in Signal._registry:
# TODO: emit warning that signal is already registered
return Signal._registry[signame]
else:
sigid = len(Signal._lookup)
Signal._registry[signame] = sigid
Signal._lookup.append(signame)
Spy.on_signal_register(signame, sigid)
return sigid
def __getattr__(self, signame):
assert type(signame) is str
return Signal._registry[signame]
# Singleton pattern:
# Turn Signal into an instance of itself so getattr works.
# This also prevents Signal() from creating a new instance.
Signal = Signal()
# Register the reserved (system) signals
Signal.register("EMPTY") # 0
Signal.register("ENTRY") # 1
Signal.register("EXIT") # 2
Signal.register("INIT") # 3
Signal.register("TERMINATE") # 4
Signal.register("ERROR") # 5
Event = collections.namedtuple("Event", ["signal", "value"])
Event.__doc__ = """Events are a tuple of (signal, value) that are passed from
one AHSM to another. Signals are defined in each AHSM's source code
by name, but resolve to a unique number. Values are any python value,
including containers that contain even more values. Each AHSM state
(static method) accepts an Event as the parameter and handles the event
based on its Signal."""
# Instantiate the reserved (system) events
Event.EMPTY = Event(Signal.EMPTY, None)
Event.ENTRY = Event(Signal.ENTRY, None)
Event.EXIT = Event(Signal.EXIT, None)
Event.INIT = Event(Signal.INIT, None)
Event.TERMINATE = Event(Signal.TERMINATE, None)
Event.ERROR = Event(Signal.ERROR, None)
# The order of this tuple MUST match their respective signals
Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT, Event.TERMINATE, Event.ERROR)
def state(func):
"""A decorator that identifies which methods are states.
The presence of the async_hsm_state attr, not the value of the attr,
determines statehood.
The Spy debugging system uses the async_hsm_state attribute
to determine which methods inside a class are actually states.
Other uses of the attribute may come in the future.
"""
@wraps(func)
def func_wrap(self, evt):
result = func(self, evt)
Spy.on_state_handler_called(func_wrap, evt, result)
return result
setattr(func_wrap, "async_hsm_state", True)
return func_wrap
class Hsm(object):
"""A Hierarchical State Machine (HSM).
Full support for hierarchical state nesting.
Guaranteed entry/exit action execution on arbitrary state transitions.
Full support of nested initial transitions.
Support for events with arbitrary parameters.
"""
# Every state handler must return one of these values
RET_HANDLED = 0
RET_IGNORED = 1
RET_TRAN = 2
RET_SUPER = 3
def __init__(self):
"""Sets this Hsm's current state to self.top(), the default state
and stores the given initial state.
"""
# self.state is the Hsm/act's current active state.
# This instance variable references the message handler (method)
# that will be called whenever a message is sent to this Hsm.
# We initialize this to self.top, the default message handler
self.state = self.top
self.state_receiving_dispatch = None
# The terminated flag indicates that this state machine is
# finished. Usually set in the _exit state.
self.terminated = False
#
# The publish_errors flag affects how exceptions raised in this
# HSM are treated. The resulting Event with type Signal.ERROR
# may be published to the framework (if publish_errors is True)
# or placed on the FIFO of this Hsm (if publish_errors is False)
# If the error is publised, it is necessary to use the subscribe
# method to be informed when the error occurs.
self.publish_errors = False
# Async_hsm differs from QP here in that we hardcode
# the initial state to be "_initial"
def _initial(self, event):
"""Raises a NotImplementedError to force the derived class
to implement its own initial state.
"""
raise NotImplementedError
@state
def _exit(self, event):
"""Default exit state handler that sets the terminated attribute of the
state machine. This may be overridden in a user's HSM class. """
sig = event.signal
if sig == Signal.ENTRY:
self.terminated = True
return self.handled(event)
return self.super(self.top)
# Helper functions to process reserved events through the current state
def trig(self, state_func, signal):
return state_func(Event.reserved[signal])
def enter(self, state_func):
return state_func(Event.ENTRY)
def exit(self, state_func):
return state_func(Event.EXIT)
# Other helper functions
def handled(self, event):
return Hsm.RET_HANDLED
def tran(self, nextState):
self.state = getattr(self, nextState) if isinstance(nextState, str) else nextState
return Hsm.RET_TRAN
def super(self, superState):
if superState is None:
self.state = self.top
else:
self.state = getattr(self, superState) if isinstance(superState, str) else superState
return Hsm.RET_SUPER # p. 158
def run_async(self, cor):
# Run an asynchronous task in the coroutine cor and post an ERROR
# event if it throws an exception
async def wrapped_cor():
try:
await cor
except Exception as e:
event = Event(Signal.ERROR, {
"exc": e,
"traceback": traceback.format_exc(),
"location": self.__class__.__name__,
"name": cor.__name__
})
if self.publish_errors:
Framework.publish(event)
else:
self.postFIFO(event)
asyncio.create_task(wrapped_cor())
def top(self, event):
"""This is the default state handler. This handler ignores all signals except for Signal.TERMINATE and
Signal.ERROR. These default actions can be overridden within a user-provided top level state.
The TERMINATE signal causes a transition to the state self._exit.
The ERROR signal does not cause a state transition, but prints a tracback message on the console.
"""
if event.signal == Signal.TERMINATE:
return self.tran(self._exit)
elif event.signal == Signal.ERROR:
print(f"Exception {event.value['exc']}\n{event.value['traceback']}")
return Hsm.RET_HANDLED
# All other events are quietly ignored
return Hsm.RET_IGNORED # p. 165
def _perform_init_chain(self, current):
"""Act on the chain of initializations required starting from current."""
t = current
while self.trig(t if t != self.top else self._initial, Signal.INIT) == Hsm.RET_TRAN:
# The state handles the INIT message and needs to make a transition. The
# "top" state is special in that it does not handle INIT messages, so we
# defer to self._initial in this case
path = [] # Trace the path back to t via superstates
while self.state != t:
path.append(self.state)
self.trig(self.state, Signal.EMPTY)
# Restore the state to the target state
self.state = path[0]
assert len(path) < 32 # MAX_NEST_DEPTH
# Perform ENTRY action for each state from current to the target
path.reverse() # in-place
for s in path:
self.enter(s)
# The target state has now to be checked to see if it responds to the INIT message
t = path[-1] # -1 because path was reversed
return t
def _perform_transition(self, source, target):
# Handle the state transition from source to target in the HSM.
s, t = source, target
path = [t]
if s == t: # Case (a), transition to self
self.exit(s)
self.enter(t)
else:
# Find parent of target
self.trig(t, Signal.EMPTY)
t = self.state # t is now parent of target
if s == t: # Case (b), source is parent of target
self.enter(path[0])
else:
# Find parent of source
self.trig(s, Signal.EMPTY)
if self.state == t: # Case (c), source and target share a parent
self.exit(s)
self.enter(path[0])
else:
if self.state == path[0]: # Case (d), target is parent of source
self.exit(s)
else: # Check if the source is an ancestor of the target (case (e))
lca_found = False
path.append(t) # Populates path[1]
t = self.state # t is now parent of source
# Find and save ancestors of target into path
# until we find the source or hit the top
self.state = path[1]
while self.state != self.top:
self.trig(self.state, Signal.EMPTY)
path.append(self.state)
assert len(path) < 32 # MAX_NEST_DEPTH
if self.state == s:
lca_found = True
break
if lca_found: # This is case (e), enter states to get to target
for st in reversed(path[:-1]):
self.enter(st)
else:
self.exit(s) # Exit the source for cases (f), (g), (h)
self.state = t # Start at parent of the source
while self.state not in path:
# Keep exiting up into superstates until we reach the LCA.
# Depending on whether the EXIT signal is handled, we may also need
# to send the EMPTY signal to make self.state climb to the superstate.
if self.exit(self.state) == Hsm.RET_HANDLED:
self.trig(self.state, Signal.EMPTY)
t = self.state
# Step into children until we enter the target
for st in reversed(path[:path.index(t)]):
self.enter(st)
def init(self):
"""Transitions to the initial state. Follows any INIT transitions
from the inital state and performs ENTRY actions as it proceeds.
Use this to pass any parameters to initialize the state machine.
p. 172
"""
# The initial state MUST transition to another state
self.state = self._perform_init_chain(self.top)
def dispatch(self, event):
"""Dispatches the given event to this Hsm.
Follows the application's state transitions
until the event is handled or top() is reached
p. 174
"""
try:
Spy.on_hsm_dispatch_event(event)
# Save the current state
t = self.state
self.state_receiving_dispatch = t
# Proceed to superstates if event is not handled, we wish to find the superstate
# (if any) that does handle the event and to record the path to that state
exit_path = []
r = Hsm.RET_SUPER
while r == Hsm.RET_SUPER:
s = self.state
exit_path.append(s)
Spy.on_hsm_dispatch_pre(s)
r = s(event) # invoke state handler
# We leave the while loop with s at the state which was able to respond
# to the event, or to self.top if none did
Spy.on_hsm_dispatch_post(exit_path)
# If the state handler for s requests a transition
if r == Hsm.RET_TRAN:
t = self.state
# Store target of transition
# Exit from the current state to the state s which handles
# the transition. We do not exit from s=exit_path[-1] itself.
for st in exit_path[:-1]:
r = self.exit(st)
assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED)
s = exit_path[-1]
# Transition to t through the HSM
self._perform_transition(s, t)
# Do initializations starting at t
t = self._perform_init_chain(t)
# Restore the state
self.state = t
self.state_receiving_dispatch = None
except Exception as e:
event = Event(Signal.ERROR, {"exc": e, "traceback": traceback.format_exc(), "location": self.__class__.__name__})
if self.publish_errors:
Framework.publish(event)
else:
self.postFIFO(event)
class Framework(object):
"""Framework is a composite class that holds:
- the asyncio event loop
- the registry of AHSMs
- the set of TimeEvents
- the handle to the next TimeEvent
- the table subscriptions to events
"""
# The event loop is accessed through the get_event_loop method. The private
# attribute __event_loop is initialized the first time get_event_loop is called
# and is subsequently returned by the get_event_loop method
__event_loop = None
# The Framework maintains a registry of Ahsms in a list.
_ahsm_registry = []
# The Framework maintains a dict of priorities in use
# to prevent duplicates.
# An Ahsm's priority is checked against this dict
# within the Ahsm.start() method
# when the Ahsm is added to the Framework.
# The dict's key is the priority (integer) and the value is the Ahsm.
_priority_dict = {}
# The Framework maintains pending TimeEvents in a dict.
# The TimeEvent is the key and the handle to the callback
# is the value. This is useful for cancelling the event if
# necessary. A time event can appear at most once within
# this dictionary, since it cannot be scheduled while it ia
#
# A nonperiodic time event will be removed from
# the dictionary when it expires, whereas periodic TimeEvents
# re-enqueue themselves and update their handles whenever
# they occur.
_time_event_handles = {}
# The Subscriber Table is a dictionary. The keys are signals.
# The value for each key is a list of Ahsms that are subscribed to the
# signal. An Ahsm may subscribe to a signal at any time during runtime.
_subscriber_table = {}
# The terminate event is accessed through the get_terminate_event method. The private
# attribute __terminate_event is initialized the first time get_terminate_event is called
# and is subsequently returned by the get_terminate_event method
# The event is set once all the AHSMs in the framework have set their terminated attribute,
# which is usually done in their _exit states
__terminate_event = None
@staticmethod
def get_event_loop():
# The first time this is called, we get the current event loop and assign it to the
# private variable __event_loop. Subsequently, return this loop. Doing this allows us
# use asyncio.run in conjunction with async_hsm, since asyncio.run creates a new
# event loop, and needs to be run before we try to call get_event_loop
if Framework.__event_loop is None:
Framework.__event_loop = asyncio.get_event_loop()
# try:
# Framework.__event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop())
# Framework.__event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop())
# Framework.__event_loop.add_signal_handler(29, Framework.print_info)
# except NotImplementedError:
# pass
return Framework.__event_loop
@staticmethod
def get_terminate_event():
# The first time this is called, we get the current event loop and assign it to the
# private variable __event_loop. Subsequently, return this loop. Doing this allows us
# use asyncio.run in conjunction with async_hsm, since asyncio.run creates a new
# event loop, and needs to be run before we try to call get_event_loop
if Framework.__terminate_event is None:
Framework.__terminate_event = asyncio.Event()
return Framework.__terminate_event
@staticmethod
def post(event, act):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is an Ahsm instance.
"""
assert isinstance(act, Ahsm)
act.postFIFO(event)
@staticmethod
def post_by_name(event, act_name):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is a string of the name of the class
to which the event is sent. The event will post to all actors
having the given classname.
"""
assert type(act_name) is str
for act in Framework._ahsm_registry:
if act.__class__.__name__ == act_name:
act.postFIFO(event)
@staticmethod
def publish(event):
"""Posts the event to the message queue of every Ahsm
that is subscribed to the event's signal.
"""
if event.signal in Framework._subscriber_table:
for act in Framework._subscriber_table[event.signal]:
act.postFIFO(event)
# Run to completion
Framework.get_event_loop().call_soon_threadsafe(Framework.run)
@staticmethod
def subscribe(signame, act):
"""Adds the given Ahsm to the subscriber table list
for the given signal. The argument, signame, is a string of the name
of the Signal to which the Ahsm is subscribing. Using a string allows
the Signal to be created in the registry if it is not already.
"""
sigid = Signal.register(signame)
if sigid not in Framework._subscriber_table:
Framework._subscriber_table[sigid] = []
Framework._subscriber_table[sigid].append(act)
@staticmethod
def addTimeEvent(tm_event, delta):
"""Adds the TimeEvent to the collection of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
after the delay, delta.
"""
expiration = Framework.get_event_loop().time() + delta
Framework.addTimeEventAt(tm_event, expiration)
@staticmethod
def addTimeEventAt(tm_event, abs_time):
"""Adds the TimeEvent to the collection of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
at the given absolute time (Framework.get_event_loop().time()).
"""
assert tm_event not in Framework._time_event_handles
Framework._scheduleTimeEvent(tm_event, abs_time)
@staticmethod
def _scheduleTimeEvent(tm_event, expiration):
"""Schedule the TimeEvent using call_at
"""
Framework._time_event_handles[tm_event] = Framework.get_event_loop().call_at(expiration, Framework.timeEventCallback,
tm_event, expiration)
@staticmethod
def removeTimeEvent(tm_event):
"""Removes the TimeEvent from the dictionary of active time events, cancelling
it if it is pending
"""
if tm_event in Framework._time_event_handles:
Framework._time_event_handles[tm_event].cancel()
del Framework._time_event_handles[tm_event]
@staticmethod
def timeEventCallback(tm_event, expiration):
"""The callback function for all TimeEvents.
Posts the event to the event's target Ahsm.
If the TimeEvent is periodic, reschedule its next occurrence.
"""
assert tm_event in Framework._time_event_handles, ("Exp:%f _time_event_handles.keys():%s" %
(expiration, Framework._time_event_handles.keys()))
# Remove this expired TimeEvent from the dictionary
del Framework._time_event_handles[tm_event]
# Post the event to the target Ahsm
tm_event.act.postFIFO(tm_event)
# If this is a periodic time event, schedule its next expiration
if tm_event.interval > 0:
Framework._scheduleTimeEvent(tm_event, expiration + tm_event.interval)
# Run to completion
Framework.get_event_loop().call_soon_threadsafe(Framework.run)
@staticmethod
def add(act):
"""Makes the framework aware of the given Ahsm.
"""
Framework._ahsm_registry.append(act)
assert act.priority not in Framework._priority_dict, ("Priority MUST be unique")
Framework._priority_dict[act.priority] = act
Spy.on_framework_add(act)
@staticmethod
def run():
"""Dispatches an event to the highest priority Ahsm
until all event queues are empty (i.e. Run To Completion).
If any exception occurs in the state handler functions called
while performing ``dispatch``, post the ERROR event on the FIFO
of the state machine, with information about the exception, a traceback
and the class name in which the exception occured, so that it can be
dealt with appropriately.
"""
def getPriority(x):
return x.priority
while True:
allQueuesEmpty = True
sorted_acts = sorted(Framework._ahsm_registry, key=getPriority)
terminate = True
for act in sorted_acts:
if act.terminated:
continue
terminate = False
if act.has_msgs():
event_next = act.pop_msg()
act.dispatch(event_next)
allQueuesEmpty = False
break
if terminate:
Framework.get_terminate_event().set()
if allQueuesEmpty:
return
@staticmethod
def stop():
"""EXITs all Ahsms and sets the _terminate_event flag.
"""
# Disable the timer callback
for tm_event in Framework._time_event_handles:
Framework._time_event_handles[tm_event].cancel()
# Post TERMINATE to all Ahsms so they execute their EXIT handler
for act in Framework._ahsm_registry:
Framework.post(Event.TERMINATE, act)
# Run to completion so each Ahsm will process SIGTERM
Framework.run()
Framework.get_terminate_event().set()
# Framework.get_event_loop().stop()
# Spy.on_framework_stop()
@staticmethod
def get_info():
"""Gets the name and current state
of each actor in the framework.
"""
result = {}
for act in Framework._ahsm_registry:
if act.state_receiving_dispatch is not None:
result[act.__class__.__name__] = {
"state_handling_event": act.state.__name__,
"state_receiving_dispatch": act.state_receiving_dispatch.__name__
}
else:
result[act.__class__.__name__] = {"state": act.state.__name__}
return result
@staticmethod
def print_info():
"""Prints the name and current state
of each actor in the framework.
Meant to be called when ctrl+T (SIGINFO/29) is issued.
"""
info_dict = Framework.get_info()
for actor in info_dict:
print(actor, info_dict[actor])
signal.signal(signal.SIGINT, lambda *args: Framework.stop())
signal.signal(signal.SIGTERM, lambda *args: Framework.stop())
@staticmethod
async def done():
"""Await this coroutine to wait for all state machines to terminate. This is written
as a loop so that CTRL-C in Windows will be acted upon"""
while True:
if Framework.get_terminate_event().is_set():
break
await asyncio.sleep(0.5)
class Ahsm(Hsm):
"""An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO.
Adds a priority, message queue and methods to work with the queue.
"""
def start(self, priority):
# must set the priority before Framework.add() which uses the priority
self.priority = priority
Framework.add(self)
self.mq = collections.deque()
try:
self.init()
except Exception as e:
event = Event(Signal.ERROR, {"exc": e, "traceback": traceback.format_exc(), "location": self.__class__.__name__})
if self.publish_errors:
Framework.publish(event)
else:
self.postFIFO(event)
# Run to completion
Framework.get_event_loop().call_soon_threadsafe(Framework.run)
def postLIFO(self, evt):
self.mq.append(evt)
def postFIFO(self, evt):
self.mq.appendleft(evt)
def pop_msg(self, ):
return self.mq.pop()
def has_msgs(self, ):
return len(self.mq) > 0
class Factory(Ahsm):
_handled_signals = {}
_parents = {}
def __init__(self):
super().__init__()
@classmethod
def _add_catch(cls, state_name, signal_name, handler_name):
cls._handled_signals[state_name][signal_name] = handler_name
@classmethod
def _build_initial(cls, initial_state, setup, signal_list):
def _initial(self, event):
if setup:
getattr(self, setup)()
for sig in signal_list:
Framework.subscribe(sig, self)
return self.tran(initial_state)
handler = state(copy(_initial))
handler.__name__ = "_initial"
handler.__qualname__ = handler.__name__
handler.__module__ = cls.__module__
setattr(cls, "_initial", handler)
@classmethod
def _build_state(cls, name, parent):
def state_handler(self, event):
sig_name = Signal._lookup[event.signal]
if sig_name in cls._handled_signals[name]:
event_handler = getattr(self, cls._handled_signals[name][sig_name])
ret_val = event_handler(event)
if ret_val is not None:
return ret_val
return self.super(parent)
handler = state(copy(state_handler))
handler.__name__ = name
handler.__qualname__ = handler.__name__
handler.__module__ = cls.__module__
setattr(cls, name, handler)
cls._handled_signals[name] = {}
cls._parents[name] = parent
@classmethod
def build_hsm(cls, descr):
cls._handled_signals = {}
cls._parents = {}
# Build the _initial method
cls._build_initial(descr.initial, descr.setup, descr.signals)
for state in descr.states:
cls._build_state(state.name, state.parent)
for catch in state.catches:
cls._add_catch(state.name, catch.signal, catch.handler)
class TimeEvent(object):
"""TimeEvent is a composite class that contains an Event.
A TimeEvent is created by the application and added to the Framework.
The Framework then emits the event after the given delay.
A one-shot TimeEvent is created by calling either postAt() or postIn().
A periodic TimeEvent is created by calling the postEvery() method.
"""
def __init__(self, signame):
assert type(signame) == str
self.signal = Signal.register(signame)
self.value = None
def postAt(self, act, abs_time):
"""Posts this TimeEvent to the given Ahsm at a specified time.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEventAt(self, abs_time)
def postIn(self, act, delta):
"""Posts this TimeEvent to the given Ahsm after the time delta.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEvent(self, delta)
def postEvery(self, act, delta):
"""Posts this TimeEvent to the given Ahsm after the time delta
and every time delta thereafter until disarmed.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = delta
Framework.addTimeEvent(self, delta)
def disarm(self):
"""Removes this TimeEvent from the Framework's active time events.
"""
self.act = None
Framework.removeTimeEvent(self) | 39.033088 | 125 | 0.618254 |
7954957285f0eed5dd7b9c5878093d9db615637b | 6,793 | py | Python | frontera/core/messagebus.py | buildfail/frontera | 84f9e1034d2868447db88e865596c0fbb32e70f6 | [
"BSD-3-Clause"
] | 1,267 | 2015-04-15T04:47:12.000Z | 2022-03-29T07:55:15.000Z | frontera/core/messagebus.py | buildfail/frontera | 84f9e1034d2868447db88e865596c0fbb32e70f6 | [
"BSD-3-Clause"
] | 316 | 2015-04-14T21:28:26.000Z | 2021-05-31T05:31:15.000Z | frontera/core/messagebus.py | buildfail/frontera | 84f9e1034d2868447db88e865596c0fbb32e70f6 | [
"BSD-3-Clause"
] | 250 | 2015-04-20T07:15:10.000Z | 2022-03-28T15:17:15.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class BaseStreamConsumer(object):
@abstractmethod
def get_messages(self, timeout=0.1, count=1):
"""
Returns ``count`` messages from stream, if they are available and operation fits within timeout. If they aren't
available, tries to get them ``timeout`` seconds time.
:param timeout: float, time in seconds
:param count: int, number of messages
:return: generator with raw messages
"""
raise NotImplementedError
@abstractmethod
def get_offset(self, partition_id):
"""
Returns consumer offset.
:param partition_id: int
:return: int consumer offset
"""
raise NotImplementedError
def close(self):
"""
Performs necessary cleanup and closes consumer.
:return: none
"""
pass
@six.add_metaclass(ABCMeta)
class BaseStreamProducer(object):
@abstractmethod
def send(self, key, *messages):
"""
Sending messages to stream.
:param key: str key used for partitioning, None for non-keyed channels
:param *messages: encoded message(s)
"""
raise NotImplementedError
@abstractmethod
def flush(self):
"""
Flushes all internal buffers.
:return: nothing
"""
raise NotImplementedError
def get_offset(self, partition_id):
"""
Returns producer offset for partition. Raises KeyError, if partition isn't available or doesn't exist.
Returns None if not applicable to current implementation.
:param partition_id: int
:return: int producer offset
"""
raise NotImplementedError
def close(self):
"""
Performs all necessary cleanup and closes the producer.
:return: none
"""
pass
@six.add_metaclass(ABCMeta)
class BaseSpiderLogStream(object):
"""
Spider Log Stream base class. This stream transfers results from spiders to Strategy and DB workers. Any producer
can write to any partition of this stream. Consumers can be bound to specific partition (SW worker) or not
bounded (DB worker) to any partition.
"""
@abstractmethod
def producer(self):
"""
Creates/returns new producer for spider log. Producing is done by using FingerprintPartitioner.
:return: BaseStreamProducer instance
"""
raise NotImplementedError
@abstractmethod
def consumer(self, partition_id, type):
"""
Creates/returns consumer of exact type and bounded to specific partition.
:param partition_id: int
:param type: consumer type, can be either "sw" or "db"
:return: BaseStreamConsumer instance assigned to given partition_id
"""
raise NotImplementedError
@six.add_metaclass(ABCMeta)
class BaseScoringLogStream(object):
"""
Scoring log stream base class. This stream is transfering score and scheduling information from Strategy workers to
DB Workers. This type of stream isn't requiring any partitioning.
"""
@abstractmethod
def consumer(self):
"""
:return: BaseStreamConsumer instance
"""
raise NotImplementedError
@abstractmethod
def producer(self):
"""
:return: BaseStreamProducer instance
"""
raise NotImplementedError
@six.add_metaclass(ABCMeta)
class BaseStatsLogStream(object):
"""
Stats log stream base class. This stream is transfering stats metrics from workers and spiders to external
data sources. This type of stream isn't requiring any partitioning.
"""
@abstractmethod
def consumer(self):
"""
:return: BaseStreamConsumer instance
"""
raise NotImplementedError
@abstractmethod
def producer(self):
"""
:return: BaseStreamProducer instance
"""
raise NotImplementedError
@six.add_metaclass(ABCMeta)
class BaseSpiderFeedStream(object):
"""
Spider Feed Stream base class. This stream transfers new batches from DB worker to spiders. Every consumer is
strictly bounded to specific partition, and producer could write to any partition. This class also has methods
for reporting of busy/available partitions. DB worker is pushing new batches only to available partitions.
"""
@abstractmethod
def consumer(self, partition_id):
"""
Creates/returns spider feed consumer object.
:param partition_id: int
:return: BaseStreamConsumer instance assigned to given partition_id
"""
raise NotImplementedError
@abstractmethod
def producer(self):
"""
Creates/returns spider feed producer object. This producer is meant to use Crc32NamePartitioner
(separating feed by hosts, so each host will be downloaded by at most one spider).
:return: BaseStreamProducer instance
"""
raise NotImplementedError
@abstractmethod
def available_partitions(self):
"""
Returns the iterable of available (ready for processing new batches) partitions.
:return: iterable of ints
"""
raise NotImplementedError
def mark_ready(self, partition_id):
"""
Marks partition as ready/available for receiving new batches.
:param partition_id: int
:return: nothing
"""
pass
def mark_busy(self, partition_id):
"""
Marks partition as busy, so that spider assigned to this partition is busy processing previous batches.
:param partition_id: int
:return: nothing
"""
pass
@six.add_metaclass(ABCMeta)
class BaseMessageBus(object):
"""
Main message bus class, encapsulating message bus context. Serving as a factory for stream-specific objects.
"""
@abstractmethod
def scoring_log(self):
"""
Create or return scoring log stream.
:return: instance of ScoringLogStream
"""
raise NotImplementedError
@abstractmethod
def spider_log(self):
"""
Create or return spider log stream.
:return: instance of SpiderLogStream
"""
raise NotImplementedError
@abstractmethod
def spider_feed(self):
"""
Create or return spider feed stream.
:return: instance of SpiderFeedStream
"""
raise NotImplementedError
@abstractmethod
def stats_log(self):
"""
Create or return stats log stream.
:return: instance of StatsLogStream
"""
raise NotImplementedError | 28.906383 | 119 | 0.649639 |
79549644ea03575f84d08ba2f114ae60df2a14da | 6,751 | py | Python | tensorflow/compiler/tests/xla_test.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
] | 384 | 2017-02-21T18:38:04.000Z | 2022-02-22T07:30:25.000Z | tensorflow/compiler/tests/xla_test.py | ChenAugustus/tensorflow | 5828e285209ff8c3d1bef2e4bd7c55ca611080d5 | [
"Apache-2.0"
] | 15 | 2017-03-01T20:18:43.000Z | 2020-05-07T10:33:51.000Z | tensorflow/compiler/tests/xla_test.py | ChenAugustus/tensorflow | 5828e285209ff8c3d1bef2e4bd7c55ca611080d5 | [
"Apache-2.0"
] | 81 | 2017-02-21T19:31:19.000Z | 2022-02-22T07:30:24.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definition of XLA test case."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import random
import re
import numpy as np
from tensorflow.contrib.compiler import jit
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
FLAGS = flags.FLAGS
flags.DEFINE_string('test_device', None,
'Tensorflow device on which to place operators under test')
flags.DEFINE_string('types', None, 'Types to test. Comma-separated list.')
flags.DEFINE_string('disabled_manifest', None,
'Path to a file with a list of tests that should not run.')
class XLATestCase(test.TestCase):
"""XLA test cases are parameterized test cases."""
def __init__(self, method_name='runTest'):
super(XLATestCase, self).__init__(method_name)
self.device = FLAGS.test_device
self.has_custom_call = (self.device == 'XLA_CPU')
self.all_tf_types = [
dtypes.as_dtype(types_pb2.DataType.Value(name))
for name in FLAGS.types.split(',')
]
self.int_tf_types = [
dtype for dtype in self.all_tf_types if dtype.is_integer
]
self.float_tf_types = [
dtype for dtype in self.all_tf_types if dtype.is_floating
]
self.numeric_tf_types = self.int_tf_types + self.float_tf_types
self.all_types = [dtype.as_numpy_dtype for dtype in self.all_tf_types]
self.int_types = [dtype.as_numpy_dtype for dtype in self.int_tf_types]
self.float_types = [dtype.as_numpy_dtype for dtype in self.float_tf_types]
self.numeric_types = self.int_types + self.float_types
# Parse the manifest file, if any, into a regex identifying tests to
# disable
self.disabled_regex = None
if FLAGS.disabled_manifest is not None:
comments_re = re.compile('#.*$')
manifest_file = open(FLAGS.disabled_manifest, 'r')
lines = manifest_file.read().splitlines()
lines = [comments_re.sub('', l).strip() for l in lines]
self.disabled_regex = re.compile('|'.join(lines))
manifest_file.close()
def setUp(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
if self.disabled_regex is not None and self.disabled_regex.match(name):
logging.info('Disabled test case: %s', name)
self.skipTest('{} is disabled by manifest.'.format(name))
return
logging.info('Start test case: %s', name)
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
logging.info('End test case: %s', self._testMethodName)
@contextlib.contextmanager
def test_session(self):
"""Custom implementation of test_session() for XLA tests.
We override the standard Tensorflow test_session() since it is too
specific to CPU and GPU tests. In particular, we want to disable soft
placement and explicitly assign ops to devices under test.
Yields:
A session to use when running a test case.
"""
graph = ops.Graph()
with session.Session(graph=graph) as sess, graph.as_default():
yield sess
@contextlib.contextmanager
def test_scope(self):
"""Test scope that runs tests on a Tensorflow/XLA device.
Uses a compilation_scope() to mark operators to compile.
Yields:
A scope to apply to the operators under test.
"""
with ops.device('device:{}:0'.format(self.device)):
yield
def Benchmark(tf_bench,
builder_fn,
use_xla_jit,
device,
separate_compiled_gradients=False):
"""Build a graph and run benchmarks against it, with or without XLA.
Args:
tf_bench: An instance of tf.test.Benchmark, used to run the benchmark.
builder_fn: A function that builds a graph when invoked, and returns
(name, fetches), where name is the name of the test, and fetches
is a list of tensors to fetch as output.
use_xla_jit: If true compile with the XLA JIT, otherwise use regular TF.
device: The tensorflow device to run on, e.g. "cpu", "gpu".
separate_compiled_gradients: If true put each gradient subgraph into a
separate compilation scope. This gives fine-grained control over which
portions of the graph will be compiled as a single unit. Compiling
gradients separately may yield better performance for some graphs.
The scope is named based on the scope of the forward computation as well
as the name of the gradients. As a result, the gradients will be compiled
in a scope that is separate from both the forward computation, and from
other gradients.
"""
with ops.Graph().as_default():
name = None
targets = []
with ops.device(device):
fetches = []
jit_scope = jit.experimental_jit_scope
with jit_scope(
compile_ops=use_xla_jit,
separate_compiled_gradients=separate_compiled_gradients):
name, fetches = builder_fn()
# We only want to benchmark the operations themselves, and not the data
# transfer of the result(s). Non-compiled identity ops ensure XLA
# doesn't know we're dropping the results, otherwise it might compile
# away the entire computation.
for fetch in fetches:
targets.append(array_ops.identity(fetch).op)
config = config_pb2.ConfigProto(allow_soft_placement=True)
with session.Session(config=config) as sess:
sess.run(variables.global_variables_initializer())
xla = 'xla_' if use_xla_jit else ''
tf_bench.run_op_benchmark(
sess, targets, name='%s_%s%s' % (name, xla, device))
| 38.798851 | 80 | 0.708636 |
7954973da0f85cb4d53f84fa524162dd7c2a5e38 | 844 | py | Python | buttoncontroller.py | eddiecarbin/riverapp | 4971ace1113021c3d4f3f0eee6db6de2c5f2f26f | [
"MIT"
] | null | null | null | buttoncontroller.py | eddiecarbin/riverapp | 4971ace1113021c3d4f3f0eee6db6de2c5f2f26f | [
"MIT"
] | null | null | null | buttoncontroller.py | eddiecarbin/riverapp | 4971ace1113021c3d4f3f0eee6db6de2c5f2f26f | [
"MIT"
] | null | null | null | from pyjon.events import EventDispatcher
# import RPi.GPIO as gpio
import digitalio
from adafruit_debouncer import Debouncer
#https://pypi.org/project/pyjon.events/
class ButtonController(metaclass=EventDispatcher):
BUTTON_EVENT = "ButtonController_buttonEvent"
def __init__(self, pin, id):
# just a sample initialization, you can do whatever you want, of course.
self.id = id
self.pin = pin
self.button = digitalio.DigitalInOut(self.pin)
self.button.direction = digitalio.Direction.INPUT
self.button.pull = digitalio.Pull.UP
self.switch = Debouncer(self.button, interval=0.1)
def update(self):
self.switch.update()
if self.switch.rose:
self.emit_event(ButtonController.BUTTON_EVENT, self.id)
# print('Just released')
| 31.259259 | 80 | 0.680095 |
79549749dbb659f860f89bf99012373b9e1818f1 | 524 | py | Python | dandeliondiary/household/migrations/0008_auto_20160904_2329.py | amberdiehl/dandeliondiary_project | e9bace5bd7980def6ca763840ab5b38f1e05cd3d | [
"FSFAP"
] | null | null | null | dandeliondiary/household/migrations/0008_auto_20160904_2329.py | amberdiehl/dandeliondiary_project | e9bace5bd7980def6ca763840ab5b38f1e05cd3d | [
"FSFAP"
] | 6 | 2020-04-29T23:54:15.000Z | 2022-03-11T23:25:24.000Z | dandeliondiary/household/migrations/0008_auto_20160904_2329.py | amberdiehl/dandeliondiary_project | e9bace5bd7980def6ca763840ab5b38f1e05cd3d | [
"FSFAP"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-04 23:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('household', '0007_auto_20160904_2328'),
]
operations = [
migrations.RemoveField(
model_name='rvhousehold',
name='indicate_children',
),
migrations.RemoveField(
model_name='rvhousehold',
name='indicate_pets',
),
]
| 21.833333 | 49 | 0.604962 |
7954975553c85ad93d5f5c50cb93ede8d5dc7d98 | 2,648 | py | Python | pythran/tests/euler/euler41.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/tests/euler/euler41.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/tests/euler/euler41.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | #runas solve()
#unittest.skip recursive generator
#pythran export solve()
''' From O'Reilly's Python Cookbook '''
def _combinators(_handle, items, n):
if n==0:
yield []
return
for i, item in enumerate(items):
this_one = [ item ]
for cc in _combinators(_handle, _handle(items, i), n-1):
yield this_one + cc
def combinations(items, n):
''' take n distinct items, order matters '''
def skipIthItem(items, i):
return items[:i] + items[i+1:]
return _combinators(skipIthItem, items, n)
def uniqueCombinations(items, n):
''' take n distinct items, order is irrelevant '''
def afterIthItem(items, i):
return items[i+1:]
return _combinators(afterIthItem, items, n)
def selections(items, n):
''' take n (not necessarily distinct) items, order matters '''
def keepAllItems(items, i):
return items
return _combinators(keepAllItems, items, n)
def permutations(items):
''' take all items, order matters '''
return combinations(items, len(items))
def solve():
'''
We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime.
What is the largest n-digit pandigital prime that exists?
'''
prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime
prime_dict = dict.fromkeys(prime_list, 1)
def _isprime(n):
''' Raw check to see if n is prime. Assumes that prime_list is already populated '''
isprime = n >= 2 and 1 or 0
for prime in prime_list: # Check for factors with all primes
if prime * prime > n: break # ... up to sqrt(n)
if not n % prime:
isprime = 0
break
if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup
return isprime
def _refresh(x):
''' Refreshes primes upto x '''
lastn = prime_list[-1]
while lastn <= x: # Keep working until we've got up to x
lastn = lastn + 1 # Check the next number
if _isprime(lastn):
prime_list.append(lastn) # Maintain a list for sequential access
# Pan-digital primes are 4 or 7 digits. Others divisible by 3
_refresh(2766) # sqrt(7654321)
for perm in permutations(range(7, 0, -1)):
num = 0
for n in perm: num = num * 10 + n
if _isprime(num):
return num
break
| 34.842105 | 169 | 0.586858 |
7954975799913846ac8c9e749b18697111341cdc | 36,944 | py | Python | plugin/core/types.py | theLine/LSP | b0c1045862601ee6e6fd3250d3a441d46a6e8d7c | [
"MIT"
] | null | null | null | plugin/core/types.py | theLine/LSP | b0c1045862601ee6e6fd3250d3a441d46a6e8d7c | [
"MIT"
] | null | null | null | plugin/core/types.py | theLine/LSP | b0c1045862601ee6e6fd3250d3a441d46a6e8d7c | [
"MIT"
] | null | null | null | from .collections import DottedDict
from .file_watcher import FileWatcherEventType
from .logging import debug, set_debug_logging
from .protocol import TextDocumentSyncKindNone
from .typing import Any, Optional, List, Dict, Generator, Callable, Iterable, Union, Set, Tuple, TypedDict, TypeVar
from .typing import cast
from .url import filename_to_uri
from .url import uri_to_filename
from threading import RLock
from wcmatch.glob import BRACE
from wcmatch.glob import globmatch
from wcmatch.glob import GLOBSTAR
import contextlib
import os
import socket
import sublime
import time
import urllib.parse
TCP_CONNECT_TIMEOUT = 5 # seconds
FEATURES_TIMEOUT = 300 # milliseconds
PANEL_FILE_REGEX = r"^(?!\s+\d+:\d+)(.*)(:)$"
PANEL_LINE_REGEX = r"^\s+(\d+):(\d+)"
FileWatcherConfig = TypedDict("FileWatcherConfig", {
"pattern": Optional[str],
"events": Optional[List[FileWatcherEventType]],
"ignores": Optional[List[str]],
}, total=False)
def basescope2languageid(base_scope: str) -> str:
# This the connection between Language IDs and ST selectors.
base_scope_map = sublime.load_settings("language-ids.sublime-settings")
result = base_scope_map.get(base_scope, base_scope.split(".")[-1])
return result if isinstance(result, str) else ""
@contextlib.contextmanager
def runtime(token: str) -> Generator[None, None, None]:
t = time.time()
yield
debug(token, "running time:", int((time.time() - t) * 1000000), "μs")
T = TypeVar("T")
def diff(old: Iterable[T], new: Iterable[T]) -> Tuple[Set[T], Set[T]]:
"""
Return a tuple of (added, removed) items
"""
old_set = old if isinstance(old, set) else set(old)
new_set = new if isinstance(new, set) else set(new)
added = new_set - old_set
removed = old_set - new_set
return added, removed
def debounced(f: Callable[[], Any], timeout_ms: int = 0, condition: Callable[[], bool] = lambda: True,
async_thread: bool = False) -> None:
"""
Possibly run a function at a later point in time, either on the async thread or on the main thread.
:param f: The function to possibly run. Its return type is discarded.
:param timeout_ms: The time in milliseconds after which to possibly to run the function
:param condition: The condition that must evaluate to True in order to run the funtion
:param async_thread: If true, run the function on the async worker thread, otherwise run the function on the
main thread
"""
def run() -> None:
if condition():
f()
runner = sublime.set_timeout_async if async_thread else sublime.set_timeout
runner(run, timeout_ms)
class SettingsRegistration:
__slots__ = ("_settings",)
def __init__(self, settings: sublime.Settings, on_change: Callable[[], None]) -> None:
self._settings = settings
settings.add_on_change("LSP", on_change)
def __del__(self) -> None:
self._settings.clear_on_change("LSP")
class Debouncer:
def __init__(self) -> None:
self._current_id = -1
self._next_id = 0
self._current_id_lock = RLock()
def debounce(self, f: Callable[[], None], timeout_ms: int = 0, condition: Callable[[], bool] = lambda: True,
async_thread: bool = False) -> None:
"""
Possibly run a function at a later point in time, either on the async thread or on the main thread.
:param f: The function to possibly run
:param timeout_ms: The time in milliseconds after which to possibly to run the function
:param condition: The condition that must evaluate to True in order to run the funtion
:param async_thread: If true, run the function on the async worker thread, otherwise run
the function on the main thread
"""
def run(debounce_id: int) -> None:
with self._current_id_lock:
if debounce_id != self._current_id:
return
if condition():
f()
runner = sublime.set_timeout_async if async_thread else sublime.set_timeout
with self._current_id_lock:
current_id = self._current_id = self._next_id
self._next_id += 1
runner(lambda: run(current_id), timeout_ms)
def cancel_pending(self) -> None:
with self._current_id_lock:
self._current_id = -1
def read_dict_setting(settings_obj: sublime.Settings, key: str, default: dict) -> dict:
val = settings_obj.get(key)
return val if isinstance(val, dict) else default
def read_list_setting(settings_obj: sublime.Settings, key: str, default: list) -> list:
val = settings_obj.get(key)
return val if isinstance(val, list) else default
class Settings:
# This is only for mypy
diagnostics_additional_delay_auto_complete_ms = None # type: int
diagnostics_delay_ms = None # type: int
diagnostics_gutter_marker = None # type: str
diagnostics_panel_include_severity_level = None # type: int
disabled_capabilities = None # type: List[str]
document_highlight_style = None # type: str
inhibit_snippet_completions = None # type: bool
inhibit_word_completions = None # type: bool
log_debug = None # type: bool
log_max_size = None # type: int
log_server = None # type: List[str]
lsp_code_actions_on_save = None # type: Dict[str, bool]
lsp_format_on_save = None # type: bool
on_save_task_timeout_ms = None # type: int
only_show_lsp_completions = None # type: bool
popup_max_characters_height = None # type: int
popup_max_characters_width = None # type: int
show_code_actions = None # type: str
show_code_lens = None # type: str
show_code_actions_in_hover = None # type: bool
show_diagnostics_count_in_view_status = None # type: bool
show_diagnostics_highlights = None # type: bool
show_diagnostics_in_view_status = None # type: bool
show_diagnostics_panel_on_save = None # type: int
show_diagnostics_severity_level = None # type: int
show_references_in_quick_panel = None # type: bool
show_symbol_action_links = None # type: bool
show_view_status = None # type: bool
def __init__(self, s: sublime.Settings) -> None:
self.update(s)
def update(self, s: sublime.Settings) -> None:
def r(name: str, default: Union[bool, int, str, list, dict]) -> None:
val = s.get(name)
setattr(self, name, val if isinstance(val, default.__class__) else default)
r("diagnostics_additional_delay_auto_complete_ms", 0)
r("diagnostics_delay_ms", 0)
r("diagnostics_gutter_marker", "dot")
r("diagnostics_panel_include_severity_level", 4)
r("disabled_capabilities", [])
r("document_highlight_style", "underline")
r("log_debug", False)
r("log_max_size", 8 * 1024)
r("lsp_code_actions_on_save", {})
r("lsp_format_on_save", False)
r("on_save_task_timeout_ms", 2000)
r("only_show_lsp_completions", False)
r("popup_max_characters_height", 1000)
r("popup_max_characters_width", 120)
r("show_code_actions", "annotation")
r("show_code_lens", "annotation")
r("show_code_actions_in_hover", True)
r("show_diagnostics_count_in_view_status", False)
r("show_diagnostics_in_view_status", True)
r("show_diagnostics_highlights", True)
r("show_diagnostics_panel_on_save", 2)
r("show_diagnostics_severity_level", 2)
r("show_references_in_quick_panel", False)
r("show_symbol_action_links", False)
r("show_view_status", True)
# Backwards-compatible with the bool setting
log_server = s.get("log_server")
if isinstance(log_server, bool):
self.log_server = ["panel"] if log_server else []
elif isinstance(log_server, list):
self.log_server = log_server
else:
self.log_server = []
# Backwards-compatible with the bool setting
auto_show_diagnostics_panel = s.get("auto_show_diagnostics_panel")
if isinstance(auto_show_diagnostics_panel, bool):
if not auto_show_diagnostics_panel:
self.show_diagnostics_panel_on_save = 0
elif isinstance(auto_show_diagnostics_panel, str):
if auto_show_diagnostics_panel == "never":
self.show_diagnostics_panel_on_save = 0
# Backwards-compatible with "only_show_lsp_completions"
only_show_lsp_completions = s.get("only_show_lsp_completions")
if isinstance(only_show_lsp_completions, bool):
self.inhibit_snippet_completions = only_show_lsp_completions
self.inhibit_word_completions = only_show_lsp_completions
else:
r("inhibit_snippet_completions", False)
r("inhibit_word_completions", True)
# Backwards-compatible with "diagnostics_highlight_style"
diagnostics_highlight_style = s.get("diagnostics_highlight_style")
if isinstance(diagnostics_highlight_style, str):
if not diagnostics_highlight_style:
self.show_diagnostics_highlights = False
# Backwards-compatible with "code_action_on_save_timeout_ms"
code_action_on_save_timeout_ms = s.get("code_action_on_save_timeout_ms")
if isinstance(code_action_on_save_timeout_ms, int):
self.on_save_task_timeout_ms = code_action_on_save_timeout_ms
set_debug_logging(self.log_debug)
def document_highlight_style_region_flags(self) -> Tuple[int, int]:
if self.document_highlight_style == "fill":
return sublime.DRAW_NO_OUTLINE, sublime.DRAW_NO_OUTLINE
elif self.document_highlight_style == "stippled":
return sublime.DRAW_NO_FILL, sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_STIPPLED_UNDERLINE # noqa: E501
else:
return sublime.DRAW_NO_FILL, sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE
class ClientStates:
STARTING = 0
READY = 1
STOPPING = 2
class DocumentFilter:
"""
A document filter denotes a document through properties like language, scheme or pattern. An example is a filter
that applies to TypeScript files on disk. Another example is a filter that applies to JSON files with name
package.json:
{ "language": "typescript", scheme: "file" }
{ "language": "json", "pattern": "**/package.json" }
Sublime Text doesn't understand what a language ID is, so we have to maintain a global translation map from language
IDs to selectors. Sublime Text also has no support for patterns. We use the wcmatch library for this.
"""
__slots__ = ("language", "scheme", "pattern")
def __init__(
self,
language: Optional[str] = None,
scheme: Optional[str] = None,
pattern: Optional[str] = None
) -> None:
self.scheme = scheme
self.pattern = pattern
self.language = language
def __call__(self, view: sublime.View) -> bool:
"""Does this filter match the view? An empty filter matches any view."""
if self.language:
syntax = view.syntax()
if not syntax or basescope2languageid(syntax.scope) != self.language:
return False
if self.scheme:
uri = view.settings().get("lsp_uri")
if isinstance(uri, str) and urllib.parse.urlparse(uri).scheme != self.scheme:
return False
if self.pattern:
if not globmatch(view.file_name() or "", self.pattern, flags=GLOBSTAR | BRACE):
return False
return True
class DocumentSelector:
"""
A DocumentSelector is a list of DocumentFilters. A view matches a DocumentSelector if and only if any one of its
filters matches against the view.
"""
__slots__ = ("filters",)
def __init__(self, document_selector: List[Dict[str, Any]]) -> None:
self.filters = [DocumentFilter(**document_filter) for document_filter in document_selector]
def __bool__(self) -> bool:
return bool(self.filters)
def matches(self, view: sublime.View) -> bool:
"""Does this selector match the view? A selector with no filters matches all views."""
return any(f(view) for f in self.filters) if self.filters else True
# method -> (capability dotted path, optional registration dotted path)
# these are the EXCEPTIONS. The general rule is: method foo/bar --> (barProvider, barProvider.id)
_METHOD_TO_CAPABILITY_EXCEPTIONS = {
'workspace/symbol': ('workspaceSymbolProvider', None),
'workspace/didChangeWorkspaceFolders': ('workspace.workspaceFolders',
'workspace.workspaceFolders.changeNotifications'),
'textDocument/didOpen': ('textDocumentSync.didOpen', None),
'textDocument/didClose': ('textDocumentSync.didClose', None),
'textDocument/didChange': ('textDocumentSync.change', None),
'textDocument/didSave': ('textDocumentSync.save', None),
'textDocument/willSave': ('textDocumentSync.willSave', None),
'textDocument/willSaveWaitUntil': ('textDocumentSync.willSaveWaitUntil', None),
'textDocument/formatting': ('documentFormattingProvider', None),
'textDocument/documentColor': ('colorProvider', None)
} # type: Dict[str, Tuple[str, Optional[str]]]
def method_to_capability(method: str) -> Tuple[str, str]:
"""
Given a method, returns the corresponding capability path, and the associated path to stash the registration key.
Examples:
textDocument/definition --> (definitionProvider, definitionProvider.id)
textDocument/references --> (referencesProvider, referencesProvider.id)
textDocument/didOpen --> (textDocumentSync.didOpen, textDocumentSync.didOpen.id)
"""
capability_path, registration_path = _METHOD_TO_CAPABILITY_EXCEPTIONS.get(method, (None, None))
if capability_path is None:
capability_path = method.split('/')[1] + "Provider"
if registration_path is None:
# This path happens to coincide with the StaticRegistrationOptions' id, which is on purpose. As a consequence,
# if a server made a "registration" via the initialize response, it can call client/unregisterCapability at
# a later date, and the capability will pop from the capabilities dict.
registration_path = capability_path + ".id"
return capability_path, registration_path
def normalize_text_sync(textsync: Union[None, int, Dict[str, Any]]) -> Dict[str, Any]:
"""
Brings legacy text sync capabilities to the most modern format
"""
result = {} # type: Dict[str, Any]
if isinstance(textsync, int):
change = {"syncKind": textsync} # type: Optional[Dict[str, Any]]
result["textDocumentSync"] = {"didOpen": {}, "save": {}, "didClose": {}, "change": change}
elif isinstance(textsync, dict):
new = {}
change = textsync.get("change")
if isinstance(change, int):
new["change"] = {"syncKind": change}
elif isinstance(change, dict):
new["change"] = change
def maybe_assign_bool_or_dict(key: str) -> None:
assert isinstance(textsync, dict)
value = textsync.get(key)
if isinstance(value, bool) and value:
new[key] = {}
elif isinstance(value, dict):
new[key] = value
open_close = textsync.get("openClose")
if isinstance(open_close, bool):
if open_close:
new["didOpen"] = {}
new["didClose"] = {}
else:
maybe_assign_bool_or_dict("didOpen")
maybe_assign_bool_or_dict("didClose")
maybe_assign_bool_or_dict("willSave")
maybe_assign_bool_or_dict("willSaveWaitUntil")
maybe_assign_bool_or_dict("save")
result["textDocumentSync"] = new
return result
class Capabilities(DottedDict):
"""
Maintains static and dynamic capabilities
Static capabilities come from a response to the initialize request (from Client -> Server).
Dynamic capabilities can be registered at any moment with client/registerCapability and client/unregisterCapability
(from Server -> Client).
"""
def register(
self,
registration_id: str,
capability_path: str,
registration_path: str,
options: Dict[str, Any]
) -> None:
stored_registration_id = self.get(registration_path)
if isinstance(stored_registration_id, str):
msg = "{} is already registered at {} with ID {}, overwriting"
debug(msg.format(capability_path, registration_path, stored_registration_id))
self.set(capability_path, options)
self.set(registration_path, registration_id)
def unregister(
self,
registration_id: str,
capability_path: str,
registration_path: str
) -> Optional[Dict[str, Any]]:
stored_registration_id = self.get(registration_path)
if not isinstance(stored_registration_id, str):
debug("stored registration ID at", registration_path, "is not a string")
return None
elif stored_registration_id != registration_id:
msg = "stored registration ID ({}) is not the same as the provided registration ID ({})"
debug(msg.format(stored_registration_id, registration_id))
return None
else:
discarded = self.get(capability_path)
self.remove(capability_path)
self.remove(registration_path)
return discarded
def assign(self, d: Dict[str, Any]) -> None:
textsync = normalize_text_sync(d.pop("textDocumentSync", None))
super().assign(d)
if textsync:
self.update(textsync)
def should_notify_did_open(self) -> bool:
return "textDocumentSync.didOpen" in self
def text_sync_kind(self) -> int:
value = self.get("textDocumentSync.change.syncKind")
return value if isinstance(value, int) else TextDocumentSyncKindNone
def should_notify_did_change_workspace_folders(self) -> bool:
return "workspace.workspaceFolders.changeNotifications" in self
def should_notify_will_save(self) -> bool:
return "textDocumentSync.willSave" in self
def should_notify_did_save(self) -> Tuple[bool, bool]:
save = self.get("textDocumentSync.save")
if isinstance(save, bool):
return save, False
elif isinstance(save, dict):
return True, bool(save.get("includeText"))
else:
return False, False
def should_notify_did_close(self) -> bool:
return "textDocumentSync.didClose" in self
def _translate_path(path: str, source: str, destination: str) -> Tuple[str, bool]:
# TODO: Case-insensitive file systems. Maybe this problem needs a much larger refactor. Even Sublime Text doesn't
# handle case-insensitive file systems correctly. There are a few other places where case-sensitivity matters, for
# example when looking up the correct view for diagnostics, and when finding a view for goto-def.
if path.startswith(source) and len(path) > len(source) and path[len(source)] in ("/", "\\"):
return path.replace(source, destination, 1), True
return path, False
class PathMap:
__slots__ = ("_local", "_remote")
def __init__(self, local: str, remote: str) -> None:
self._local = local
self._remote = remote
@classmethod
def parse(cls, json: Any) -> "Optional[List[PathMap]]":
if not isinstance(json, list):
return None
result = [] # type: List[PathMap]
for path_map in json:
if not isinstance(path_map, dict):
debug('path map entry is not an object')
continue
local = path_map.get("local")
if not isinstance(local, str):
debug('missing "local" key for path map entry')
continue
remote = path_map.get("remote")
if not isinstance(remote, str):
debug('missing "remote" key for path map entry')
continue
result.append(PathMap(local, remote))
return result
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PathMap):
return False
return self._local == other._local and self._remote == other._remote
def map_from_local_to_remote(self, uri: str) -> Tuple[str, bool]:
return _translate_path(uri, self._local, self._remote)
def map_from_remote_to_local(self, uri: str) -> Tuple[str, bool]:
return _translate_path(uri, self._remote, self._local)
class TransportConfig:
__slots__ = ("name", "command", "tcp_port", "env", "listener_socket")
def __init__(
self,
name: str,
command: List[str],
tcp_port: Optional[int],
env: Dict[str, str],
listener_socket: Optional[socket.socket]
) -> None:
if not command and not tcp_port:
raise ValueError('neither "command" nor "tcp_port" is provided; cannot start a language server')
self.name = name
self.command = command
self.tcp_port = tcp_port
self.env = env
self.listener_socket = listener_socket
class ClientConfig:
def __init__(self,
name: str,
selector: str,
priority_selector: Optional[str] = None,
schemes: Optional[List[str]] = None,
command: Optional[List[str]] = None,
binary_args: Optional[List[str]] = None, # DEPRECATED
tcp_port: Optional[int] = None,
auto_complete_selector: Optional[str] = None,
enabled: bool = True,
init_options: DottedDict = DottedDict(),
settings: DottedDict = DottedDict(),
env: Dict[str, str] = {},
experimental_capabilities: Optional[Dict[str, Any]] = None,
disabled_capabilities: DottedDict = DottedDict(),
file_watcher: FileWatcherConfig = {},
path_maps: Optional[List[PathMap]] = None) -> None:
self.name = name
self.selector = selector
self.priority_selector = priority_selector if priority_selector else self.selector
if isinstance(schemes, list):
self.schemes = schemes # type: List[str]
else:
self.schemes = ["file"]
if isinstance(command, list):
self.command = command
else:
assert isinstance(binary_args, list)
self.command = binary_args
self.tcp_port = tcp_port
self.auto_complete_selector = auto_complete_selector
self.enabled = enabled
self.init_options = init_options
self.settings = settings
self.env = env
self.experimental_capabilities = experimental_capabilities
self.disabled_capabilities = disabled_capabilities
self.file_watcher = file_watcher
self.path_maps = path_maps
self.status_key = "lsp_{}".format(self.name)
@classmethod
def from_sublime_settings(cls, name: str, s: sublime.Settings, file: str) -> "ClientConfig":
base = sublime.decode_value(sublime.load_resource(file))
settings = DottedDict(base.get("settings", {})) # defined by the plugin author
settings.update(read_dict_setting(s, "settings", {})) # overrides from the user
init_options = DottedDict(base.get("initializationOptions", {}))
init_options.update(read_dict_setting(s, "initializationOptions", {}))
disabled_capabilities = s.get("disabled_capabilities")
file_watcher = cast(FileWatcherConfig, read_dict_setting(s, "file_watcher", {}))
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = DottedDict()
return ClientConfig(
name=name,
selector=_read_selector(s),
priority_selector=_read_priority_selector(s),
schemes=s.get("schemes"),
command=read_list_setting(s, "command", []),
tcp_port=s.get("tcp_port"),
auto_complete_selector=s.get("auto_complete_selector"),
# Default to True, because an LSP plugin is enabled iff it is enabled as a Sublime package.
enabled=bool(s.get("enabled", True)),
init_options=init_options,
settings=settings,
env=read_dict_setting(s, "env", {}),
experimental_capabilities=s.get("experimental_capabilities"),
disabled_capabilities=disabled_capabilities,
file_watcher=file_watcher,
path_maps=PathMap.parse(s.get("path_maps"))
)
@classmethod
def from_dict(cls, name: str, d: Dict[str, Any]) -> "ClientConfig":
disabled_capabilities = d.get("disabled_capabilities")
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = DottedDict()
schemes = d.get("schemes")
if not isinstance(schemes, list):
schemes = ["file"]
return ClientConfig(
name=name,
selector=_read_selector(d),
priority_selector=_read_priority_selector(d),
schemes=schemes,
command=d.get("command", []),
tcp_port=d.get("tcp_port"),
auto_complete_selector=d.get("auto_complete_selector"),
enabled=d.get("enabled", False),
init_options=DottedDict(d.get("initializationOptions")),
settings=DottedDict(d.get("settings")),
env=d.get("env", dict()),
experimental_capabilities=d.get("experimental_capabilities"),
disabled_capabilities=disabled_capabilities,
file_watcher=d.get("file_watcher", dict()),
path_maps=PathMap.parse(d.get("path_maps"))
)
@classmethod
def from_config(cls, src_config: "ClientConfig", override: Dict[str, Any]) -> "ClientConfig":
path_map_override = PathMap.parse(override.get("path_maps"))
disabled_capabilities = override.get("disabled_capabilities")
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = src_config.disabled_capabilities
return ClientConfig(
name=src_config.name,
selector=_read_selector(override) or src_config.selector,
priority_selector=_read_priority_selector(override) or src_config.priority_selector,
schemes=override.get("schemes", src_config.schemes),
command=override.get("command", src_config.command),
tcp_port=override.get("tcp_port", src_config.tcp_port),
auto_complete_selector=override.get("auto_complete_selector", src_config.auto_complete_selector),
enabled=override.get("enabled", src_config.enabled),
init_options=DottedDict.from_base_and_override(
src_config.init_options, override.get("initializationOptions")),
settings=DottedDict.from_base_and_override(src_config.settings, override.get("settings")),
env=override.get("env", src_config.env),
experimental_capabilities=override.get(
"experimental_capabilities", src_config.experimental_capabilities),
disabled_capabilities=disabled_capabilities,
file_watcher=override.get("file_watcher", src_config.file_watcher),
path_maps=path_map_override if path_map_override else src_config.path_maps
)
def resolve_transport_config(self, variables: Dict[str, str]) -> TransportConfig:
tcp_port = None # type: Optional[int]
listener_socket = None # type: Optional[socket.socket]
if self.tcp_port is not None:
# < 0 means we're hosting a TCP server
if self.tcp_port < 0:
# -1 means pick any free port
if self.tcp_port < -1:
tcp_port = -self.tcp_port
# Create a listener socket for incoming connections
listener_socket = _start_tcp_listener(tcp_port)
tcp_port = int(listener_socket.getsockname()[1])
else:
tcp_port = _find_free_port() if self.tcp_port == 0 else self.tcp_port
if tcp_port is not None:
variables["port"] = str(tcp_port)
command = sublime.expand_variables(self.command, variables)
command = [os.path.expanduser(arg) for arg in command]
if tcp_port is not None:
# DEPRECATED -- replace {port} with $port or ${port} in your client config
command = [a.replace('{port}', str(tcp_port)) for a in command]
env = os.environ.copy()
for key, value in self.env.items():
if key == 'PATH':
env[key] = sublime.expand_variables(value, variables) + os.path.pathsep + env[key]
else:
env[key] = sublime.expand_variables(value, variables)
return TransportConfig(self.name, command, tcp_port, env, listener_socket)
def set_view_status(self, view: sublime.View, message: str) -> None:
if sublime.load_settings("LSP.sublime-settings").get("show_view_status"):
status = "{}: {}".format(self.name, message) if message else self.name
view.set_status(self.status_key, status)
def erase_view_status(self, view: sublime.View) -> None:
view.erase_status(self.status_key)
def match_view(self, view: sublime.View, scheme: str) -> bool:
syntax = view.syntax()
if not syntax:
return False
# Every part of a x.y.z scope seems to contribute 8.
# An empty selector result in a score of 1.
# A non-matching non-empty selector results in a score of 0.
# We want to match at least one part of an x.y.z, and we don't want to match on empty selectors.
return scheme in self.schemes and sublime.score_selector(syntax.scope, self.selector) >= 8
def map_client_path_to_server_uri(self, path: str) -> str:
if self.path_maps:
for path_map in self.path_maps:
path, mapped = path_map.map_from_local_to_remote(path)
if mapped:
break
return filename_to_uri(path)
def map_server_uri_to_client_path(self, uri: str) -> str:
path = uri_to_filename(uri)
if self.path_maps:
for path_map in self.path_maps:
path, mapped = path_map.map_from_remote_to_local(path)
if mapped:
break
return path
def is_disabled_capability(self, capability_path: str) -> bool:
for value in self.disabled_capabilities.walk(capability_path):
if isinstance(value, bool):
return value
elif isinstance(value, dict):
if value:
# If it's not empty we'll continue the walk
continue
else:
# This might be a leaf node
return True
return False
def filter_out_disabled_capabilities(self, capability_path: str, options: Dict[str, Any]) -> Dict[str, Any]:
result = {} # type: Dict[str, Any]
for k, v in options.items():
if not self.is_disabled_capability("{}.{}".format(capability_path, k)):
result[k] = v
return result
def __repr__(self) -> str:
items = [] # type: List[str]
for k, v in self.__dict__.items():
if not k.startswith("_"):
items.append("{}={}".format(k, repr(v)))
return "{}({})".format(self.__class__.__name__, ", ".join(items))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, ClientConfig):
return False
for k, v in self.__dict__.items():
if not k.startswith("_") and v != getattr(other, k):
return False
return True
def syntax2scope(syntax_path: str) -> Optional[str]:
syntax = sublime.syntax_from_path(syntax_path)
return syntax.scope if syntax else None
def view2scope(view: sublime.View) -> str:
try:
return view.scope_name(0).split()[0]
except IndexError:
return ''
def _read_selector(config: Union[sublime.Settings, Dict[str, Any]]) -> str:
# Best base scenario,
selector = config.get("selector")
if isinstance(selector, str):
return selector
# Otherwise, look for "languages": [...]
languages = config.get("languages")
if isinstance(languages, list):
selectors = []
for language in languages:
# First priority is document_selector,
document_selector = language.get("document_selector")
if isinstance(document_selector, str):
selectors.append(document_selector)
continue
# After that syntaxes has priority,
syntaxes = language.get("syntaxes")
if isinstance(syntaxes, list):
for path in syntaxes:
syntax = sublime.syntax_from_path(path)
if syntax:
selectors.append(syntax.scope)
continue
# No syntaxes and no document_selector... then there must exist a languageId.
language_id = language.get("languageId")
if isinstance(language_id, str):
selectors.append("source.{}".format(language_id))
return "|".join(map("({})".format, selectors))
# Otherwise, look for "document_selector"
document_selector = config.get("document_selector")
if isinstance(document_selector, str):
return document_selector
# Otherwise, look for "syntaxes": [...]
syntaxes = config.get("syntaxes")
if isinstance(syntaxes, list):
selectors = []
for path in syntaxes:
syntax = sublime.syntax_from_path(path)
if syntax:
selectors.append(syntax.scope)
return "|".join(selectors)
# No syntaxes and no document_selector... then there must exist a languageId.
language_id = config.get("languageId")
if language_id:
return "source.{}".format(language_id)
return ""
def _read_priority_selector(config: Union[sublime.Settings, Dict[str, Any]]) -> str:
# Best case scenario
selector = config.get("priority_selector")
if isinstance(selector, str):
return selector
# Otherwise, look for "languages": [...]
languages = config.get("languages")
if isinstance(languages, list):
selectors = []
for language in languages:
# First priority is feature_selector.
feature_selector = language.get("feature_selector")
if isinstance(feature_selector, str):
selectors.append(feature_selector)
continue
# After that scopes has priority.
scopes = language.get("scopes")
if isinstance(scopes, list):
selectors.extend(scopes)
continue
# No scopes and no feature_selector. So there must be a languageId
language_id = language.get("languageId")
if isinstance(language_id, str):
selectors.append("source.{}".format(language_id))
return "|".join(map("({})".format, selectors))
# Otherwise, look for "feature_selector"
feature_selector = config.get("feature_selector")
if isinstance(feature_selector, str):
return feature_selector
# Otherwise, look for "scopes": [...]
scopes = config.get("scopes")
if isinstance(scopes, list):
return "|".join(map("({})".format, scopes))
# No scopes and no feature_selector... then there must exist a languageId
language_id = config.get("languageId")
if language_id:
return "source.{}".format(language_id)
return ""
def _find_free_port() -> int:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _start_tcp_listener(tcp_port: Optional[int]) -> socket.socket:
sock = socket.socket()
sock.bind(('localhost', tcp_port or 0))
sock.settimeout(TCP_CONNECT_TIMEOUT)
sock.listen(1)
return sock
| 41.697517 | 135 | 0.641755 |
795497a6a251b8f65f4e554d1cc445c62ef199d8 | 11,494 | py | Python | pyspedas/erg/satellite/erg/mgf/mgf.py | pulupa/pyspedas | 7228199cf16eca2a27d130f1e4985ef1e69462ea | [
"MIT"
] | null | null | null | pyspedas/erg/satellite/erg/mgf/mgf.py | pulupa/pyspedas | 7228199cf16eca2a27d130f1e4985ef1e69462ea | [
"MIT"
] | null | null | null | pyspedas/erg/satellite/erg/mgf/mgf.py | pulupa/pyspedas | 7228199cf16eca2a27d130f1e4985ef1e69462ea | [
"MIT"
] | null | null | null | import cdflib
import numpy as np
from pytplot import clip, get_data, options, ylim
from ..load import load
def mgf(trange=['2017-03-27', '2017-03-28'],
datatype='8sec',
level='l2',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
uname=None,
passwd=None,
time_clip=False,
ror=True,
coord='dsi',
version=None):
"""
This function loads data from the MGF experiment from the Arase mission
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
datatype: str
Data type; Valid options:
level: str
Data level; Valid options:
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
ror: bool
If set, print PI info and rules of the road
coord: str
"sm", "dsi", "gse", "gsm", "sgi"
version: str
Set this value to specify the version of cdf files (such as "v03.03", "v03.04", ...)
Returns:
List of tplot variables created.
"""
initial_notplot_flag = False
if notplot:
initial_notplot_flag = True
if datatype == '8s' or datatype == '8':
datatype = '8sec'
elif datatype == '64':
datatype = '64hz'
elif datatype == '128':
datatype = '128hz'
elif datatype == '256':
datatype = '256hz'
prefix = 'erg_mgf_'+level+'_'
if datatype == '8sec':
file_res = 3600. * 24
pathformat = 'satellite/erg/mgf/'+level+'/'+datatype + \
'/%Y/%m/erg_mgf_'+level+'_'+datatype+'_%Y%m%d_'
else:
file_res = 3600.
pathformat = 'satellite/erg/mgf/'+level+'/'+datatype + \
'/%Y/%m/erg_mgf_'+level+'_'+datatype+'_' + coord + '_%Y%m%d%H_'
if version is None:
pathformat += 'v??.??.cdf'
else:
pathformat += version + '.cdf'
loaded_data = load(pathformat=pathformat, file_res=file_res, trange=trange, level=level, datatype=datatype, prefix=prefix, suffix=suffix, get_support_data=get_support_data,
varformat=varformat, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update, uname=uname, passwd=passwd)
if (loaded_data is None) or (loaded_data == []):
return loaded_data
if (len(loaded_data) > 0) and ror:
try:
if isinstance(loaded_data, list):
if downloadonly:
cdf_file = cdflib.CDF(loaded_data[-1])
gatt = cdf_file.globalattsget()
else:
gatt = get_data(loaded_data[-1], metadata=True)['CDF']['GATT']
elif isinstance(loaded_data, dict):
gatt = loaded_data[list(loaded_data.keys())[-1]]['CDF']['GATT']
# --- print PI info and rules of the road
print(' ')
print(
'**************************************************************************')
print(gatt["LOGICAL_SOURCE_DESCRIPTION"])
print('')
print('Information about ERG MGF')
print('')
print('PI: ', gatt['PI_NAME'])
print("Affiliation: "+gatt["PI_AFFILIATION"])
print('')
print('RoR of ERG project common: https://ergsc.isee.nagoya-u.ac.jp/data_info/rules_of_the_road.shtml.en')
print(
'RoR of MGF L2: https://ergsc.isee.nagoya-u.ac.jp/mw/index.php/ErgSat/Mgf')
print('Contact: erg_mgf_info at isee.nagoya-u.ac.jp')
print(
'**************************************************************************')
except:
print('printing PI info and rules of the road was failed')
if initial_notplot_flag or downloadonly:
return loaded_data
if datatype == '8sec':
# remove -1.0e+30
clip(prefix + 'mag_'+datatype+'_dsi'+suffix, -1e+6, 1e6)
clip(prefix + 'mag_'+datatype+'_gse'+suffix, -1e+6, 1e6)
clip(prefix + 'mag_'+datatype+'_gsm'+suffix, -1e+6, 1e6)
clip(prefix + 'mag_'+datatype+'_sm'+suffix, -1e+6, 1e6)
clip(prefix + 'magt_'+datatype+suffix, -1e+6, 1e6)
clip(prefix + 'rmsd_'+datatype+'_dsi'+suffix, -1e+6, +1e+6)
clip(prefix + 'rmsd_'+datatype+'_gse'+suffix, -1e+6, +1e+6)
clip(prefix + 'rmsd_'+datatype+'_gsm'+suffix, -1e+6, +1e+6)
clip(prefix + 'rmsd_'+datatype+'_sm'+suffix, -1e+6, +1e+6)
clip(prefix + 'rmsd_'+datatype+suffix, 0., 80.)
clip(prefix + 'dyn_rng_'+datatype+suffix, -120., +1e+6)
clip(prefix + 'igrf_'+datatype+'_dsi'+suffix, -1e+6, +1e+6)
clip(prefix + 'igrf_'+datatype+'_gse'+suffix, -1e+6, +1e+6)
clip(prefix + 'igrf_'+datatype+'_gsm'+suffix, -1e+6, +1e+6)
clip(prefix + 'igrf_'+datatype+'_sm'+suffix, -1e+6, +1e+6)
# set yrange
_, bdata = get_data(prefix + 'mag_'+datatype+'_dsi'+suffix)
ylim(prefix + 'mag_'+datatype+'_dsi'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'mag_'+datatype+'_gse'+suffix)
ylim(prefix + 'mag_'+datatype+'_gse'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'mag_'+datatype+'_gsm'+suffix)
ylim(prefix + 'mag_'+datatype+'_gsm'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'mag_'+datatype+'_sm'+suffix)
ylim(prefix + 'mag_'+datatype+'_sm'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'magt_'+datatype+suffix)
ylim(prefix + 'magt_'+datatype+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'rmsd_'+datatype+suffix,)
ylim(prefix + 'rmsd_'+datatype+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'rmsd_'+datatype+'_dsi'+suffix)
ylim(prefix + 'rmsd_'+datatype+'_dsi'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'rmsd_'+datatype+'_gse'+suffix)
ylim(prefix + 'rmsd_'+datatype+'_gse'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'rmsd_'+datatype+'_gsm'+suffix)
ylim(prefix + 'rmsd_'+datatype+'_gsm'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'rmsd_'+datatype+'_sm'+suffix)
ylim(prefix + 'rmsd_'+datatype+'_sm'+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'rmsd_'+datatype+suffix)
ylim(prefix + 'rmsd_'+datatype+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'quality_'+datatype+suffix)
ylim(prefix + 'quality_'+datatype+suffix,
np.nanmin(bdata), np.nanmax(bdata))
_, bdata = get_data(prefix + 'quality_'+datatype+'_gc'+suffix)
ylim(prefix + 'quality_'+datatype+'_gc' +
suffix, np.nanmin(bdata), np.nanmax(bdata))
# set labels
options(prefix + 'mag_'+datatype+'_dsi'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'mag_'+datatype+'_gse'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'mag_'+datatype+'_gsm'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'mag_'+datatype+'_sm'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'rmsd_'+datatype+'_dsi'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'rmsd_'+datatype+'_gse'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'rmsd_'+datatype+'_gsm'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'rmsd_'+datatype+'_sm'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'igrf_'+datatype+'_dsi'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'igrf_'+datatype+'_gse'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'igrf_'+datatype+'_gsm'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
options(prefix + 'igrf_'+datatype+'_sm'+suffix,
'legend_names', ['Bx', 'By', 'Bz'])
# set color of the labels
options(prefix + 'mag_'+datatype+'_dsi' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'mag_'+datatype+'_gse' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'mag_'+datatype+'_gsm' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'mag_'+datatype+'_sm' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'rmsd_'+datatype+'_dsi' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'rmsd_'+datatype+'_gse' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'rmsd_'+datatype+'_gsm' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'rmsd_'+datatype+'_sm' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'quality_'+datatype+suffix, 'Color', ['r', 'g', 'b'])
options(prefix + 'igrf_'+datatype+'_dsi' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'igrf_'+datatype+'_gse' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'igrf_'+datatype+'_gsm' +
suffix, 'Color', ['b', 'g', 'r'])
options(prefix + 'igrf_'+datatype+'_sm' +
suffix, 'Color', ['b', 'g', 'r'])
else:
# remove -1.0e+30
clip(prefix + 'mag_'+datatype+'_' + coord + suffix, -1e+6, 1e6)
# set yrange
_, bdata = get_data(prefix + 'mag_'+datatype+'_' + coord + suffix)
ylim(prefix + 'mag_'+datatype+'_' + coord +
suffix, np.nanmin(bdata), np.nanmax(bdata))
# set labels
options(prefix + 'mag_'+datatype+'_' + coord +
suffix, 'legend_names', ['Bx', 'By', 'Bz'])
# set color of the labels
options(prefix + 'mag_'+datatype+'_' + coord +
suffix, 'Color', ['b', 'g', 'r'])
return loaded_data
| 39.771626 | 176 | 0.538368 |
795498700d7e016d2e8657192e413f9f224d4289 | 397 | py | Python | todo/tasks/urls.py | santhosh2000/Hackathon_ToDoList | c828985dc2e322ba43928a8ae35bbf8aa13198ef | [
"CC-BY-3.0"
] | 1 | 2020-07-11T03:56:55.000Z | 2020-07-11T03:56:55.000Z | todo/tasks/urls.py | santhosh2000/Hackathon_ToDoList | c828985dc2e322ba43928a8ae35bbf8aa13198ef | [
"CC-BY-3.0"
] | null | null | null | todo/tasks/urls.py | santhosh2000/Hackathon_ToDoList | c828985dc2e322ba43928a8ae35bbf8aa13198ef | [
"CC-BY-3.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('postlogin', views.postLogin, name="login-output"),
path('signup', views.signup, name="signup"),
path('postsignup', views.postSignup, name="signup"),
path('', views.welcome, name="index"),
path('update_task/<str:pk>/', views.updateTask, name="update_task"),
path('delete/<str:pk>/', views.deleteTask, name="delete"),
] | 36.090909 | 69 | 0.697733 |
795498743eaa8e235edb91a39f6502f65b746024 | 2,454 | py | Python | aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/SwitchDBInstanceVpcRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/SwitchDBInstanceVpcRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/SwitchDBInstanceVpcRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SwitchDBInstanceVpcRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'SwitchDBInstanceVpc','rds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_PrivateIpAddress(self):
return self.get_query_params().get('PrivateIpAddress')
def set_PrivateIpAddress(self,PrivateIpAddress):
self.add_query_param('PrivateIpAddress',PrivateIpAddress)
def get_VPCId(self):
return self.get_query_params().get('VPCId')
def set_VPCId(self,VPCId):
self.add_query_param('VPCId',VPCId) | 34.083333 | 78 | 0.771801 |
79549895b6162339c23c074a4dcc93d43ade9d32 | 15,823 | py | Python | test/test.py | dynaptico/pamfaxp | 664875776767fefacd2ebb3cf48f8d3b6a9afae6 | [
"MIT"
] | 1 | 2016-03-18T13:19:52.000Z | 2016-03-18T13:19:52.000Z | test/test.py | dynaptico/pamfaxp | 664875776767fefacd2ebb3cf48f8d3b6a9afae6 | [
"MIT"
] | 3 | 2015-02-11T20:52:01.000Z | 2020-10-11T15:26:04.000Z | test/test.py | dynaptico/pamfaxp | 664875776767fefacd2ebb3cf48f8d3b6a9afae6 | [
"MIT"
] | 2 | 2015-02-12T09:17:11.000Z | 2020-03-26T14:56:16.000Z | #!/usr/bin/env python
from pamfax import PamFax
import logging
import socket
import sys
import unittest
import time
sys.path = ['..:'] + sys.path
IP_ADDR = socket.gethostbyname('www.dynaptico.com')
HOST = 'sandbox-api.pamfax.biz'
USERNAME = 'username'
PASSWORD = 'password'
APIKEY = 'apikey'
APISECRET = 'apisecret'
DROPBOX_USERNAME = 'username'
DROPBOX_PASSWORD = 'password'
"""
Make sure to upload a file through
https://sandbox-api.pamfax.biz/server/faxin/faxintest.php
before running this test.
"""
logger = logging.getLogger('pamfax')
def _assert_json(message, response):
logger.debug(message)
logger.debug(response)
assert response['result']['code'] == 'success'
logger.debug('*'*10)
def _assert_file(message, f, content_type):
logger.debug(message)
logger.debug(content_type)
assert f is not None
assert content_type is not None
# Seed our test account with credit and a fax
pamfax = PamFax(USERNAME, PASSWORD, host=HOST, apikey=APIKEY, apisecret=APISECRET)
message = 'Adding credit to sandbox user'
response = pamfax.add_credit_to_sandbox_user(1000, "Testing purposes")
_assert_json(message, response)
message = 'Listing inbox faxes'
response = pamfax.list_inbox_faxes()
_assert_json(message, response)
files = response['InboxFaxes']['content']
f = files[0]
file_uuid = f['file_uuid']
uuid = f['uuid']
class TestPamFax(unittest.TestCase):
"""A set of unit tests for this implementation of the PamFax API."""
def test_Common(self):
message = 'Getting current settings'
response = pamfax.get_current_settings()
_assert_json(message, response)
message = 'Getting file'
f, content_type = pamfax.get_file(file_uuid)
_assert_file(message, f, content_type)
message = 'Getting geo IP information'
response = pamfax.get_geo_ip_information(IP_ADDR)
_assert_json(message, response)
message = 'Getting page preview'
f, content_type = pamfax.get_page_preview(uuid, 1)
_assert_file(message, f, content_type)
message = 'Listing countries'
response = pamfax.list_countries()
_assert_json(message, response)
message = 'Listing countries for zone'
response = pamfax.list_countries_for_zone(1)
_assert_json(message, response)
message = 'Listing currencies 1'
response = pamfax.list_currencies()
_assert_json(message, response)
message = 'Listing currencies 2'
response = pamfax.list_currencies('JPY')
_assert_json(message, response)
message = 'Listing languages 1'
response = pamfax.list_languages()
_assert_json(message, response)
message = 'Listing languages 2'
response = pamfax.list_languages(50)
_assert_json(message, response)
message = 'Listing strings'
response = pamfax.list_strings(['hello'])
_assert_json(message, response)
message = 'Listing supported file types'
response = pamfax.list_supported_file_types()
_assert_json(message, response)
message = 'Listing timezones'
response = pamfax.list_timezones()
_assert_json(message, response)
message = 'Listing versions'
response = pamfax.list_versions()
_assert_json(message, response)
message = 'Listing zones'
response = pamfax.list_zones()
_assert_json(message, response)
def test_FaxHistory(self):
message = 'Adding note to fax'
response = pamfax.add_fax_note(uuid, 'This is my favorite fax')
_assert_json(message, response)
message = 'Counting faxes'
response = pamfax.count_faxes('inbox')
_assert_json(message, response)
message = 'Deleting faxes'
response = pamfax.delete_faxes([uuid])
_assert_json(message, response)
message = 'Restoring fax'
response = pamfax.restore_fax(uuid)
_assert_json(message, response)
#message = 'Deleting faxes from trash'
#response = pamfax.delete_faxes_from_trash([uuid])
#_assert_json(message, response)
message = 'Emptying trash'
response = pamfax.empty_trash()
_assert_json(message, response)
message = 'Getting fax details'
response = pamfax.get_fax_details(uuid)
_assert_json(message, response)
#message = 'Getting fax group'
#response = pamfax.get_fax_group(uuid)
#_assert_json(message, response)
message = 'Getting inbox fax'
response = pamfax.get_inbox_fax(uuid, mark_read=False)
_assert_json(message, response)
#message = 'Listing fax group'
#response = pamfax.list_fax_group(uuid)
#_assert_json(message, response)
message = 'Listing fax notes'
response = pamfax.list_fax_notes(uuid)
_assert_json(message, response)
message = 'Listing inbox faxes'
response = pamfax.list_inbox_faxes()
_assert_json(message, response)
message = 'Listing outbox faxes'
response = pamfax.list_outbox_faxes()
_assert_json(message, response)
message = 'Listing recent faxes'
response = pamfax.list_recent_faxes()
_assert_json(message, response)
message = 'Listing sent faxes'
response = pamfax.list_sent_faxes()
_assert_json(message, response)
out_uuid = response['SentFaxes']['content'][0]['uuid']
message = 'Getting transmission report'
f, content_type = pamfax.get_transmission_report(out_uuid)
_assert_file(message, f, content_type)
#message = 'Listing trash'
#response = pamfax.list_trash()
#_assert_json(message, response)
message = 'Listing unpaid faxes'
response = pamfax.list_unpaid_faxes()
_assert_json(message, response)
message = 'Setting fax as read'
response = pamfax.set_fax_read(uuid)
_assert_json(message, response)
message = 'Setting faxes as read'
response = pamfax.set_faxes_as_read([uuid])
_assert_json(message, response)
message = 'Setting spam state for faxes'
response = pamfax.set_spam_state_for_faxes([uuid], is_spam=False)
_assert_json(message, response)
def test_FaxJob(self):
message = 'Creating a fax job'
response = pamfax.create()
_assert_json(message, response)
message = 'Listing available covers'
response = pamfax.list_available_covers()
_assert_json(message, response)
message = 'Adding a cover'
response = pamfax.set_cover(response['Covers']['content'][1]['id'], 'Dynaptico: Tomorrow On Demand')
_assert_json(message, response)
message = 'Adding a remote file'
response = pamfax.add_remote_file('https://s3.amazonaws.com/dynaptico/Dynaptico.pdf')
_assert_json(message, response)
message = 'Adding a local file'
response = pamfax.add_file('Dynaptico.pdf')
_assert_json(message, response)
message = 'Removing a file'
response = pamfax.remove_file(response['FaxContainerFile']['file_uuid'])
_assert_json(message, response)
message = 'Adding recipient 1'
response = pamfax.add_recipient('+81345789554')
_assert_json(message, response)
message = 'Adding recipient 2'
response = pamfax.add_recipient('+81362763902')
_assert_json(message, response)
message = 'Removing a recipient'
response = pamfax.remove_recipient(response['FaxRecipient']['number'])
_assert_json(message, response)
message ='Listing recipients'
response = pamfax.list_recipients()
_assert_json(message, response)
message = 'Listing fax files'
response = pamfax.list_fax_files()
_assert_json(message, response)
# Check state
logger.debug('*'*10)
logger.debug('Checking state')
t = 0
while True:
fax_state = pamfax.get_state()
logger.debug(fax_state)
if fax_state['FaxContainer']['state'] == 'ready_to_send':
break
time.sleep(2)
t += 2
logger.debug("%d seconds elapsed...", t)
assert fax_state['FaxContainer']['state'] == 'ready_to_send'
message = 'Preview the fax'
response = pamfax.get_preview()
_assert_json(message, response)
message = 'Send the fax'
response = pamfax.send()
_assert_json(message, response)
# This only works if you don't have enough credit now
#message = 'Send the fax later'
#response = pamfax.send_later()
#_assert_json(message, response)
# This only works after the fax has moved to the fax history
#message = 'Cloning the fax'
#response = pamfax.clone_fax(faxjob['FaxContainer']['uuid'])
#_assert_json(message, response)
def test_NumberInfo(self):
message = 'Getting number info'
response = pamfax.get_number_info('+81362763902')
_assert_json(message, response)
message = 'Getting page price'
response = pamfax.get_page_price('+81362763902')
_assert_json(message, response)
def test_OnlineStorage(self):
message = 'Dropping authentication'
response = pamfax.drop_authentication('DropBoxStorage')
_assert_json(message, response)
message = 'Authenticating'
response = pamfax.authenticate('DropBoxStorage', DROPBOX_USERNAME, DROPBOX_PASSWORD)
_assert_json(message, response)
message = 'Getting provider logo'
f, content_type = pamfax.get_provider_logo('DropBoxStorage', 16)
_assert_file(message, f, content_type)
message = 'Listing folder contents'
response = pamfax.list_folder_contents('DropBoxStorage')
_assert_json(message, response)
message = 'Listing providers'
response = pamfax.list_providers()
_assert_json(message, response)
#message = 'Setting auth token'
#response = pamfax.set_auth_token('DropBoxStorage', token)
#_assert_json(message, response)
def test_Session(self):
message = 'Creating login identifier'
response = pamfax.create_login_identifier(timetolifeminutes=10)
_assert_json(message, response)
message = 'Listing changes'
response = pamfax.list_changes()
_assert_json(message, response)
#message = 'Logging out'
#response = pamfax.logout()
#_assert_json(message, response)
message = 'Pinging'
response = pamfax.ping()
_assert_json(message, response)
message = 'Registering listener'
response = pamfax.register_listener(['faxsending', 'faxfailed'])
_assert_json(message, response)
message = 'Reloading user'
response = pamfax.reload_user()
_assert_json(message, response)
#message = 'Verifying user'
#response = pamfax.verify_user(USERNAME, PASSWORD)
#_assert_json(message, response)
def test_Shopping(self):
#message = 'Getting invoice'
#f, content_type = pamfax.get_invoice('')
#_assert_file(message, f, content_type)
message = 'Getting nearest fax in number'
response = pamfax.get_nearest_fax_in_number(IP_ADDR)
_assert_json(message, response)
message = 'Getting shop link'
response = pamfax.get_shop_link('pro_plan')
_assert_json(message, response)
message = 'Listing available items'
response = pamfax.list_available_items()
_assert_json(message, response)
message = 'Listing fax in area codes'
response = pamfax.list_fax_in_areacodes('JP')
_assert_json(message, response)
message = 'Listing fax in countries'
response = pamfax.list_fax_in_countries()
_assert_json(message, response)
message = 'Redeeming credit voucher'
response = pamfax.redeem_credit_voucher('PCPC0815')
_assert_json(message, response)
def test_UserInfo(self):
#message = 'Creating user'
#response = pamfax.create_user('abc123xyz', 'abc123xyz', 'xyz123abc', 'abc123xyz@gmail.com', 'en-US')
#_assert_json(message, response)
#message = 'Deleting user'
#response = pamfax.delete_user()
#_assert_json(message, response)
message = 'Getting culture info'
response = pamfax.get_culture_info()
_assert_json(message, response)
#message = 'Getting user\'s avatar'
#response = pamfax.get_users_avatar()
#_assert_json(message, response)
message = 'Has avatar?'
response = pamfax.has_avatar()
_assert_json(message, response)
message = 'Has plan?'
response = pamfax.has_plan()
_assert_json(message, response)
message = 'Listing expirations'
response = pamfax.list_expirations()
_assert_json(message, response)
message = 'Listing inboxes'
response = pamfax.list_inboxes(expired_too=True, shared_too=True)
_assert_json(message, response)
message = 'Listing orders'
response = pamfax.list_orders()
_assert_json(message, response)
message = 'Listing profiles'
response = pamfax.list_profiles()
_assert_json(message, response)
message = 'Listing user agents'
response = pamfax.list_user_agents(max=2)
_assert_json(message, response)
message = 'Listing wall messages'
response = pamfax.list_wall_messages(count=1)
_assert_json(message, response)
message = 'Saving user'
response = pamfax.save_user()
_assert_json(message, response)
message = 'Sending message'
response = pamfax.send_message('Hello, world!', type='email', recipient='test@example.com', subject='Hello')
_assert_json(message, response)
message = 'Sending password reset message'
response = pamfax.send_password_reset_message(USERNAME)
_assert_json(message, response)
#message = 'Setting online storage settings'
#response = pamfax.set_online_storage_settings('DropBoxStorage', ['inbox_enabled=1', 'inbox_path=/'])
#_assert_json(message, response)
#message = 'Setting password'
#response = pamfax.set_password('temppw', hashFunction='plain')
#_assert_json(message, response)
#message = 'Setting password'
#response = pamfax.set_password(PASSWORD, hashFunction='plain')
#_assert_json(message, response)
#message = 'Setting profile properties'
#response = pamfax.set_profile_properties()
#_assert_json(message, response)
message = 'Validating new username'
response = pamfax.validate_new_username('bogususername')
_assert_json(message, response)
if __name__ == '__main__':
unittest.main()
| 34.101293 | 116 | 0.623207 |
795498c480eae612ec239fb47ac2f0ddb7b795aa | 1,031 | py | Python | tests/journal.api/error_notes.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 25 | 2018-04-23T01:45:39.000Z | 2021-12-10T06:01:23.000Z | tests/journal.api/error_notes.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 53 | 2018-05-31T04:55:00.000Z | 2021-10-07T21:41:32.000Z | tests/journal.api/error_notes.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 12 | 2018-04-23T22:50:40.000Z | 2022-02-20T17:27:23.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2021 all rights reserved
def test():
"""
Verify access to the channel metadata
"""
# access
import journal
# make a channel
channel = journal.error("test.channel")
# get its metadata
notes = channel.notes
# adjust the application name
notes["application"] = "error_notes"
# add something
notes["author"] = "michael"
# make sure the adjustments stick by asking for the notes once again; this step is
# non-trivial: if support is provided by the C++ library, it ensures that the notes are
# mutable
notes = channel.notes
# and comparing against expectations
assert notes["application"] == "error_notes"
assert notes["author"] == "michael"
assert notes["channel"] == "test.channel"
assert notes["severity"] == "error"
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 22.911111 | 91 | 0.638215 |
7954990e3fb145649796bca0a2c85d60c38c2c55 | 336 | py | Python | Q67_Add-Binary.py | xiaosean/leetcode_python | 844ece02d699bfc620519bd94828ed0e18597f3e | [
"MIT"
] | null | null | null | Q67_Add-Binary.py | xiaosean/leetcode_python | 844ece02d699bfc620519bd94828ed0e18597f3e | [
"MIT"
] | null | null | null | Q67_Add-Binary.py | xiaosean/leetcode_python | 844ece02d699bfc620519bd94828ed0e18597f3e | [
"MIT"
] | null | null | null | class Solution:
def addBinary(self, a: str, b: str) -> str:
def bin_(s):
n = len(s)
i = 0
sum_ = 0
while i<n:
if s[n-i-1]=="1":
sum_ += 1<<i
i += 1
return sum_
return bin(bin_(a)+bin_(b))[2:]
| 25.846154 | 47 | 0.333333 |
79549985f2c88252367857e0491e846ae0241dee | 2,104 | py | Python | py/test/unit_tests.py | michaelJwilson/legacypipe | 47d005356cbd0c9fb864c960ee7bbf800e543cad | [
"BSD-3-Clause"
] | null | null | null | py/test/unit_tests.py | michaelJwilson/legacypipe | 47d005356cbd0c9fb864c960ee7bbf800e543cad | [
"BSD-3-Clause"
] | null | null | null | py/test/unit_tests.py | michaelJwilson/legacypipe | 47d005356cbd0c9fb864c960ee7bbf800e543cad | [
"BSD-3-Clause"
] | null | null | null | import unittest
class TestOneblob(unittest.TestCase):
def test_modelsel(self):
from legacypipe.oneblob import _select_model
nparams = dict(ptsrc=2, rex=3, exp=5, dev=5, comp=9)
galaxy_margin = 3.**2 + (nparams['exp'] - nparams['ptsrc'])
rex = True
chisqs = dict(ptsrc=0, rex=0)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'none')
chisqs = dict(ptsrc=500, rex=0)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'ptsrc')
chisqs = dict(ptsrc=0, rex=500)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'rex')
chisqs = dict(ptsrc=500, rex=501)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'ptsrc')
chisqs = dict(ptsrc=500, rex=503)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'ptsrc')
chisqs = dict(ptsrc=500, rex=510)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'rex')
chisqs = dict(ptsrc=500, rex=504, exp=505)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'ptsrc')
chisqs = dict(ptsrc=500, rex=505, exp=505)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'rex')
chisqs = dict(ptsrc=500, rex=505, exp=520)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'exp')
chisqs = dict(ptsrc=5000, rex=5005, exp=5020)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'ptsrc')
chisqs = dict(ptsrc=5000, rex=5005, exp=5051)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'exp')
chisqs = dict(ptsrc=5000, rex=5005, exp=5051, dev=5052)
mod = _select_model(chisqs, nparams, galaxy_margin)
self.assertTrue(mod == 'dev')
if __name__ == '__main__':
unittest.main()
| 33.396825 | 67 | 0.618346 |
795499af87420eb986337891fe5e5a27355903be | 6,053 | py | Python | test/test-comm-activities.py | althoenm/CILs-first-bot | 7c38c59f21c36a883c12f678555306bafd64b820 | [
"Apache-2.0"
] | null | null | null | test/test-comm-activities.py | althoenm/CILs-first-bot | 7c38c59f21c36a883c12f678555306bafd64b820 | [
"Apache-2.0"
] | null | null | null | test/test-comm-activities.py | althoenm/CILs-first-bot | 7c38c59f21c36a883c12f678555306bafd64b820 | [
"Apache-2.0"
] | null | null | null | # %%
import datetime
import time
from secrets import username, password
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from openpyxl import Workbook, load_workbook
# %%
# access sheet names
wb = load_workbook('week_12-3.xlsx')
#print(wb.sheetnames)
# %%
# Load community activity note sheet
sh = wb['community_activities']
row_count = sh.max_row
#print(row_count)
# %%
# return data in workbook rows as tuples
sheet_cells = []
for rows in sh.iter_rows():
row_cells = []
for cell in rows:
row_cells.append(cell.value)
sheet_cells.append(list(row_cells))
# %%
# Create list of tuples. Each tuple corresponds to one row, skipping headers.
datas = []
for i in range(1, len(sheet_cells)):
datas.append(sheet_cells[i])
print(datas)
for i in range(len(datas)):
print(i)
# %%
# Open Chrome
driver = webdriver.Chrome('/Users/matte/Downloads/chromedriver')
# Open the website
driver.get("https://www.cilsfirst.com/")
# Wait for the page to load
time.sleep(2)
# %%
# Ask for username and password
# try:
# user = input("Username: ")
# p = getpass.getpass("Password: ")
# except:
# print("Error: username or password not recognized")
# driver.close()
user = username
p = password
# %%
# Find the username and password fields and enter the credentials
try:
username_path = driver.find_element_by_xpath('/html/body/form/center/div/table/tbody/tr[1]/td[2]/input').send_keys(user)
password_path = driver.find_element_by_xpath('/html/body/form/center/div/table/tbody/tr[2]/td[2]/input').send_keys(p)
except:
print("Could not find the username and password fields")
# %%
# Find the login button and click it
submit = driver.find_element_by_xpath('/html/body/form/center/div/table/tbody/tr[3]/td/input').click()
time.sleep(2)
# %%
# %%
# def fix_date(date):
# d = date
# d = str(d.strftime('%m/%d/%Y'))
# return d
# %%
for i in range(len(datas)):
date = str(datas[i][0])
hours = str(datas[i][1])
time_begun = str(datas[i][2])
time_ended = str(datas[i][3])
issue_area = datas[i][4].strip()
projects = datas[i][5].strip()
service_program = datas[i][6].strip()
priority_area = datas[i][7]
funding_source = datas[i][8]
note = datas[i][9]
if i == 0:
# Find the Community Activities button and click it
i_path = driver.find_element_by_xpath('//*[@id="side_4"]/a').click()
time.sleep(2)
# Find 'add new community activities record' button and click it
add_new_path = driver.find_element_by_xpath('//*[@id="content_div"]/div[2]/table/tbody/tr[4]/td/a').click()
# Enter date
contact_date = driver.find_element_by_xpath('//*[@id="in_244clone"]').send_keys(date)
time.sleep(1.5)
# Enter time begun
time_begun = driver.find_element_by_xpath('//*[@id="in_530"]').send_keys(time_begun)
time.sleep(.25)
# Time ended HH:MM
time_ended = driver.find_element_by_xpath('//*[@id="in_531"]').send_keys(time_ended)
time.sleep(.25)
# Hours (float)
hours = driver.find_element_by_xpath('//*[@id="in_250"]').send_keys(hours)
time.sleep(.25)
# Issue area dropdown
issue_area = driver.find_element_by_xpath('//*[@id="in_247"]').send_keys(issue_area)
time.sleep(.25)
# Projects dropdown
projects = driver.find_element_by_xpath('//*[@id="in_258"]').send_keys(projects)
time.sleep(.25)
# service program dropdown
service_program = driver.find_element_by_xpath('//*[@id="in_248"]').send_keys(service_program)
time.sleep(.25)
# priority area dropdown
priority_area = driver.find_element_by_xpath('//*[@id="in_338"]').send_keys(priority_area)
# Enter funding source dropdown
funding_source = driver.find_element_by_xpath('//*[@id="in_303"]').send_keys(funding_source)
time.sleep(.25)
# Enter social worker's note in text box
staff_comments = driver.find_element_by_xpath('//*[@id="in_255"]').send_keys(note)
time.sleep(.25)
# Click save record
save_record = driver.find_element_by_xpath('//*[@id="sub"]').click()
else:
# find add a new record (from 'record successfully saved') and click it
add_new_path = driver.find_element_by_xpath('//*[@id="data_form"]/table[1]/tbody/tr[4]/td/input[3]').click()
time.sleep(0.25)
# Enter date
contact_date = driver.find_element_by_xpath('//*[@id="in_244clone"]').send_keys(date)
time.sleep(1.5)
# Enter time begun
time_begun = driver.find_element_by_xpath('//*[@id="in_530"]').send_keys(time_begun)
time.sleep(.25)
# Time ended HH:MM
time_ended = driver.find_element_by_xpath('//*[@id="in_531"]').send_keys(time_ended)
time.sleep(.25)
# Hours (float)
hours = driver.find_element_by_xpath('//*[@id="in_250"]').send_keys(hours)
time.sleep(.25)
# Issue area dropdown
issue_area = driver.find_element_by_xpath('//*[@id="in_247"]').send_keys(issue_area)
time.sleep(.25)
# Projects dropdown
projects = driver.find_element_by_xpath('//*[@id="in_258"]').send_keys(projects)
time.sleep(.25)
# service program dropdown
service_program = driver.find_element_by_xpath('//*[@id="in_248"]').send_keys(service_program)
time.sleep(.25)
# priority area dropdown
priority_area = driver.find_element_by_xpath('//*[@id="in_338"]').send_keys(priority_area)
# Enter funding source dropdown
funding_source = driver.find_element_by_xpath('//*[@id="in_303"]').send_keys(funding_source)
time.sleep(.25)
# Enter social worker's note in text box
staff_comments = driver.find_element_by_xpath('//*[@id="in_255"]').send_keys(note)
time.sleep(.25)
# Click save record
save_record = driver.find_element_by_xpath('//*[@id="sub"]').click() | 32.026455 | 124 | 0.644474 |
795499d08c546484a1cfdbcf0654fd1cd0ec4e51 | 21,125 | py | Python | captum/attr/_utils/attribution.py | i-jones/captum | 567ec6fc67ab85ce07d075b25428be22bb65e31b | [
"BSD-3-Clause"
] | 3,140 | 2019-10-10T17:05:37.000Z | 2022-03-31T17:31:01.000Z | captum/attr/_utils/attribution.py | i-jones/captum | 567ec6fc67ab85ce07d075b25428be22bb65e31b | [
"BSD-3-Clause"
] | 758 | 2019-10-11T18:01:04.000Z | 2022-03-31T21:36:07.000Z | captum/attr/_utils/attribution.py | i-jones/captum | 567ec6fc67ab85ce07d075b25428be22bb65e31b | [
"BSD-3-Clause"
] | 345 | 2019-10-10T17:17:06.000Z | 2022-03-30T07:31:31.000Z | #!/usr/bin/env python3
from typing import Any, Callable, Generic, List, Tuple, Type, Union, cast
import torch
import torch.nn.functional as F
from captum._utils.common import (
_format_additional_forward_args,
_format_tensor_into_tuples,
_run_forward,
_validate_target,
)
from captum._utils.gradient import compute_gradients
from captum._utils.typing import ModuleOrModuleList, TargetType
from captum.attr._utils.common import (
_format_input_baseline,
_sum_rows,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class Attribution:
r"""
All attribution algorithms extend this class. It enforces its child classes
to extend and override core `attribute` method.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
self.forward_func = forward_func
attribute: Callable
r"""
This method computes and returns the attribution values for each input tensor.
Deriving classes are responsible for implementing its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs (tensor or tuple of tensors): Input for which attribution
is computed. It can be provided as a single tensor or
a tuple of multiple tensors. If multiple input tensors
are provided, the batch sizes must be aligned accross all
tensors.
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution values for each
input tensor. The `attributions` have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
@property
def multiplies_by_inputs(self):
return False
def has_convergence_delta(self) -> bool:
r"""
This method informs the user whether the attribution algorithm provides
a convergence delta (aka an approximation error) or not. Convergence
delta may serve as a proxy of correctness of attribution algorithm's
approximation. If deriving attribution class provides a
`compute_convergence_delta` method, it should
override both `compute_convergence_delta` and `has_convergence_delta` methods.
Returns:
bool:
Returns whether the attribution algorithm
provides a convergence delta (aka approximation error) or not.
"""
return False
compute_convergence_delta: Callable
r"""
The attribution algorithms which derive `Attribution` class and provide
convergence delta (aka approximation error) should implement this method.
Convergence delta can be computed based on certain properties of the
attribution alogrithms.
Args:
attributions (tensor or tuple of tensors): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
*args (optional): Additonal arguments that are used by the
sub-classes depending on the specific implementation
of `compute_convergence_delta`.
Returns:
*tensor* of **deltas**:
- **deltas** (*tensor*):
Depending on specific implementaion of
sub-classes, convergence delta can be returned per
sample in form of a tensor or it can be aggregated
across multuple samples and returned in form of a
single floating point tensor.
"""
@classmethod
def get_name(cls: Type["Attribution"]) -> str:
r"""
Create readable class name by inserting a space before any capital
characters besides the very first.
Returns:
str: a readable class name
Example:
for a class called IntegratedGradients, we return the string
'Integrated Gradients'
"""
return "".join(
[
char if char.islower() or idx == 0 else " " + char
for idx, char in enumerate(cls.__name__)
]
)
class GradientAttribution(Attribution):
r"""
All gradient based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
self.gradient_func = compute_gradients
@log_usage()
def compute_convergence_delta(
self,
attributions: Union[Tensor, Tuple[Tensor, ...]],
start_point: Union[
None, int, float, Tensor, Tuple[Union[int, float, Tensor], ...]
],
end_point: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
r"""
Here we provide a specific implementation for `compute_convergence_delta`
which is based on a common property among gradient-based attribution algorithms.
In the literature sometimes it is also called completeness axiom. Completeness
axiom states that the sum of the attribution must be equal to the differences of
NN Models's function at its end and start points. In other words:
sum(attributions) - (F(end_point) - F(start_point)) is close to zero.
Returned delta of this method is defined as above stated difference.
This implementation assumes that both the `start_point` and `end_point` have
the same shape and dimensionality. It also assumes that the target must have
the same number of examples as the `start_point` and the `end_point` in case
it is provided in form of a list or a non-singleton tensor.
Args:
attributions (tensor or tuple of tensors): Precomputed attribution
scores. The user can compute those using any attribution
algorithm. It is assumed the the shape and the
dimensionality of attributions must match the shape and
the dimensionality of `start_point` and `end_point`.
It also assumes that the attribution tensor's
dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
start_point (tensor or tuple of tensors, optional): `start_point`
is passed as an input to model's forward function. It
is the starting point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
end_point (tensor or tuple of tensors): `end_point`
is passed as an input to model's forward function. It
is the end point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
`additional_forward_args` is used both for `start_point`
and `end_point` when computing the forward pass.
Default: None
Returns:
*tensor* of **deltas**:
- **deltas** (*tensor*):
This implementation returns convergence delta per
sample. Deriving sub-classes may do any type of aggregation
of those values, if necessary.
"""
end_point, start_point = _format_input_baseline(end_point, start_point)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# tensorizing start_point in case it is a scalar or one example baseline
# If the batch size is large we could potentially also tensorize only one
# sample and expand the output to the rest of the elements in the batch
start_point = _tensorize_baseline(end_point, start_point)
attributions = _format_tensor_into_tuples(attributions)
# verify that the attributions and end_point match on 1st dimension
for attribution, end_point_tnsr in zip(attributions, end_point):
assert end_point_tnsr.shape[0] == attribution.shape[0], (
"Attributions tensor and the end_point must match on the first"
" dimension but found attribution: {} and end_point: {}".format(
attribution.shape[0], end_point_tnsr.shape[0]
)
)
num_samples = end_point[0].shape[0]
_validate_input(end_point, start_point)
_validate_target(num_samples, target)
with torch.no_grad():
start_out_sum = _sum_rows(
_run_forward(
self.forward_func, start_point, target, additional_forward_args
)
)
end_out_sum = _sum_rows(
_run_forward(
self.forward_func, end_point, target, additional_forward_args
)
)
row_sums = [_sum_rows(attribution) for attribution in attributions]
attr_sum = torch.stack(
[cast(Tensor, sum(row_sum)) for row_sum in zip(*row_sums)]
)
_delta = attr_sum - (end_out_sum - start_out_sum)
return _delta
class PerturbationAttribution(Attribution):
r"""
All perturbation based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
@property
def multiplies_by_inputs(self):
return True
class InternalAttribution(Attribution, Generic[ModuleOrModuleList]):
layer: ModuleOrModuleList
r"""
Shared base class for LayerAttrubution and NeuronAttribution,
attribution types that require a model and a particular layer.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
Attribution.__init__(self, forward_func)
self.layer = layer
self.device_ids = device_ids
class LayerAttribution(InternalAttribution):
r"""
Layer attribution provides attribution values for the given layer, quanitfying
the importance of each neuron within the given layer's output. The output
attribution of calling attribute on a LayerAttribution object always matches
the size of the layer output.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
@staticmethod
def interpolate(
layer_attribution: Tensor,
interpolate_dims: Union[int, Tuple[int, ...]],
interpolate_mode: str = "nearest",
) -> Tensor:
r"""
Interpolates given 3D, 4D or 5D layer attribution to given dimensions.
This is often utilized to upsample the attribution of a convolutional layer
to the size of an input, which allows visualizing in the input space.
Args:
layer_attribution (torch.Tensor): Tensor of given layer attributions.
interpolate_dims (int or tuple): Upsampled dimensions. The
number of elements must be the number of dimensions
of layer_attribution - 2, since the first dimension
corresponds to number of examples and the second is
assumed to correspond to the number of channels.
interpolate_mode (str): Method for interpolation, which
must be a valid input interpolation mode for
torch.nn.functional. These methods are
"nearest", "area", "linear" (3D-only), "bilinear"
(4D-only), "bicubic" (4D-only), "trilinear" (5D-only)
based on the number of dimensions of the given layer
attribution.
Returns:
*tensor* of upsampled **attributions**:
- **attributions** (*tensor*):
Upsampled layer attributions with first 2 dimensions matching
slayer_attribution and remaining dimensions given by
interpolate_dims.
"""
return F.interpolate(layer_attribution, interpolate_dims, mode=interpolate_mode)
class NeuronAttribution(InternalAttribution):
r"""
Neuron attribution provides input attribution for a given neuron, quanitfying
the importance of each input feature in the activation of a particular neuron.
Calling attribute on a NeuronAttribution object requires also providing
the index of the neuron in the output of the given layer for which attributions
are required.
The output attribution of calling attribute on a NeuronAttribution object
always matches the size of the input.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
attribute: Callable
r"""
This method computes and returns the neuron attribution values for each
input tensor. Deriving classes are responsible for implementing
its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs: A single high dimensional input tensor or a tuple of them.
neuron_selector (int or tuple): Tuple providing index of neuron in output
of given layer for which attribution is desired. Length of
this tuple must be one less than the number of
dimensions in the output of the given layer (since
dimension 0 corresponds to number of examples).
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution values for
each input vector. The `attributions` have the
dimensionality of inputs.
"""
| 44.194561 | 88 | 0.605207 |
79549aab0986cc7c0a85e9ec69910877b85d44ab | 47,971 | py | Python | python/src/vmaf/core/train_test_model.py | mymatin/vmaf | d785a9ab8999e12c7d87d4bd20a89d9a2b95b74d | [
"Apache-2.0"
] | 1 | 2018-12-27T17:12:05.000Z | 2018-12-27T17:12:05.000Z | python/src/vmaf/core/train_test_model.py | mymatin/vmaf | d785a9ab8999e12c7d87d4bd20a89d9a2b95b74d | [
"Apache-2.0"
] | 1 | 2018-07-20T00:03:48.000Z | 2018-07-20T00:03:48.000Z | python/src/vmaf/core/train_test_model.py | mymatin/vmaf | d785a9ab8999e12c7d87d4bd20a89d9a2b95b74d | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod
import os
import pickle
from numbers import Number
from sklearn.metrics import f1_score
import numpy as np
from vmaf.tools.decorator import deprecated
from vmaf.tools.misc import indices
from vmaf.core.mixin import TypeVersionEnabled
from vmaf.core.perf_metric import RmsePerfMetric, SrccPerfMetric, PccPerfMetric, \
KendallPerfMetric, AucPerfMetric, ResolvingPowerPerfMetric
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
class RegressorMixin(object):
@classmethod
def get_stats(cls, ys_label, ys_label_pred, **kwargs):
# cannot have None
assert all(x is not None for x in ys_label)
assert all(x is not None for x in ys_label_pred)
# RMSE
rmse = RmsePerfMetric(ys_label, ys_label_pred) \
.evaluate(enable_mapping=True)['score']
# spearman
srcc = SrccPerfMetric(ys_label, ys_label_pred) \
.evaluate(enable_mapping=True)['score']
# pearson
pcc = PccPerfMetric(ys_label, ys_label_pred) \
.evaluate(enable_mapping=True)['score']
# kendall
kendall = KendallPerfMetric(ys_label, ys_label_pred) \
.evaluate(enable_mapping=True)['score']
stats = {'RMSE': rmse,
'SRCC': srcc,
'PCC': pcc,
'KENDALL': kendall,
'ys_label': list(ys_label),
'ys_label_pred': list(ys_label_pred)}
ys_label_raw = kwargs['ys_label_raw'] if 'ys_label_raw' in kwargs else None
if ys_label_raw is not None:
try:
# AUC
result = AucPerfMetric(ys_label_raw, ys_label_pred).evaluate()
stats['AUC_DS'] = result['AUC_DS']
stats['AUC_BW'] = result['AUC_BW']
except TypeError: # AUC would not work with dictionary-style dataset
stats['AUC_DS'] = float('nan')
stats['AUC_BW'] = float('nan')
try:
# ResPow
respow = ResolvingPowerPerfMetric(ys_label_raw, ys_label_pred) \
.evaluate(enable_mapping=False)['score']
stats['ResPow'] = respow
except TypeError: # ResPow would not work with dictionary-style dataset
stats['ResPow'] = float('nan')
try:
# ResPow
respow_norm = ResolvingPowerPerfMetric(ys_label_raw, ys_label_pred) \
.evaluate(enable_mapping=True)['score']
stats['ResPowNormalized'] = respow_norm
except TypeError: # ResPow would not work with dictionary-style dataset
stats['ResPowNormalized'] = float('nan')
if 'ys_label_stddev' in kwargs and 'ys_label_stddev' and kwargs['ys_label_stddev'] is not None:
stats['ys_label_stddev'] = kwargs['ys_label_stddev']
return stats
@staticmethod
def format_stats_for_plot(stats):
if stats is None:
return '(Invalid Stats)'
else:
if 'AUC_DS' in stats and 'AUC_BW' in stats and 'ResPow' in stats and 'ResPowNormalized' in stats:
return '(SRCC: {srcc:.3f}, PCC: {pcc:.3f}, RMSE: {rmse:.3f},\n AUC: {auc_ds:.3f}/{auc_bw:.3f}, ' \
'ResPow: {respow:.3f}/{respownorm:.3f})'. \
format(srcc=stats['SRCC'], pcc=stats['PCC'], rmse=stats['RMSE'],
auc_ds=stats['AUC_DS'], auc_bw=stats['AUC_BW'],
respow=stats['ResPow'], respownorm=stats['ResPowNormalized'])
else:
return '(SRCC: {srcc:.3f}, PCC: {pcc:.3f}, RMSE: {rmse:.3f})'. \
format(srcc=stats['SRCC'], pcc=stats['PCC'], rmse=stats['RMSE'])
@staticmethod
def format_stats_for_print(stats):
if stats is None:
return '(Invalid Stats)'
else:
if 'AUC_DS' in stats and 'AUC_BW' in stats and 'ResPow' in stats and 'ResPowNormalized' in stats:
return '(SRCC: {srcc:.3f}, PCC: {pcc:.3f}, RMSE: {rmse:.3f}, AUC: {auc_ds:.3f}/{auc_bw:.3f}, ' \
'ResPow: {respow:.3f}/{respownorm:.3f})'. \
format(srcc=stats['SRCC'], pcc=stats['PCC'], rmse=stats['RMSE'],
auc_ds=stats['AUC_DS'], auc_bw=stats['AUC_BW'],
respow=stats['ResPow'], respownorm=stats['ResPowNormalized'])
else:
return '(SRCC: {srcc:.3f}, PCC: {pcc:.3f}, RMSE: {rmse:.3f})'. \
format(srcc=stats['SRCC'], pcc=stats['PCC'], rmse=stats['RMSE'])
@staticmethod
@deprecated
def format_stats2(stats):
if stats is None:
return 'Invalid Stats'
else:
return 'RMSE: {rmse:.3f}\nPCC: {pcc:.3f}\nSRCC: {srcc:.3f}'.format(
srcc=stats['SRCC'], pcc=stats['PCC'], rmse=stats['RMSE'])
@classmethod
def aggregate_stats_list(cls, stats_list):
aggregate_ys_label = []
aggregate_ys_label_pred = []
for stats in stats_list:
aggregate_ys_label += stats['ys_label']
aggregate_ys_label_pred += stats['ys_label_pred']
return cls.get_stats(aggregate_ys_label, aggregate_ys_label_pred)
@classmethod
def plot_scatter(cls, ax, stats, **kwargs):
assert len(stats['ys_label']) == len(stats['ys_label_pred'])
content_ids = kwargs['content_ids'] if 'content_ids' in kwargs else None
point_labels = kwargs['point_labels'] if 'point_labels' in kwargs else None
if content_ids is None:
ax.scatter(stats['ys_label'], stats['ys_label_pred'])
else:
assert len(stats['ys_label']) == len(content_ids)
unique_content_ids = list(set(content_ids))
import matplotlib.pyplot as plt
cmap = plt.get_cmap()
colors = [cmap(i) for i in np.linspace(0, 1, len(unique_content_ids))]
for idx, curr_content_id in enumerate(unique_content_ids):
curr_idxs = indices(content_ids, lambda cid: cid == curr_content_id)
curr_ys_label = np.array(stats['ys_label'])[curr_idxs]
curr_ys_label_pred = np.array(stats['ys_label_pred'])[curr_idxs]
try:
curr_ys_label_stddev = np.array(stats['ys_label_stddev'])[curr_idxs]
ax.errorbar(curr_ys_label, curr_ys_label_pred,
xerr=1.96 * curr_ys_label_stddev,
marker='o', linestyle='', label=curr_content_id, color=colors[idx % len(colors)])
except:
ax.errorbar(curr_ys_label, curr_ys_label_pred,
marker='o', linestyle='', label=curr_content_id, color=colors[idx % len(colors)])
if point_labels:
assert len(point_labels) == len(stats['ys_label'])
for i, point_label in enumerate(point_labels):
ax.annotate(point_label, (stats['ys_label'][i], stats['ys_label_pred'][i]))
@staticmethod
def get_objective_score(result, type='SRCC'):
"""
Objective score is something to MAXIMIZE. e.g. SRCC, or -RMSE.
:param result:
:param type:
:return:
"""
if type == 'SRCC':
return result['SRCC']
elif type == 'PCC':
return result['PCC']
elif type == 'KENDALL':
return result['KENDALL']
elif type == 'RMSE':
return -result['RMSE']
else:
assert False, 'Unknow type: {} for get_objective_score().'.format(type)
class ClassifierMixin(object):
@classmethod
def get_stats(cls, ys_label, ys_label_pred, **kwargs):
# cannot have None
assert all(x is not None for x in ys_label)
assert all(x is not None for x in ys_label_pred)
# RMSE
rmse = np.sqrt(np.mean(
np.power(np.array(ys_label) - np.array(ys_label_pred), 2.0)))
# f1
f1 = f1_score(ys_label_pred, ys_label)
# error rate
errorrate = np.mean(np.array(ys_label) != np.array(ys_label_pred))
stats = {'RMSE': rmse,
'f1': f1,
'errorrate': errorrate,
'ys_label': list(ys_label),
'ys_label_pred': list(ys_label_pred)}
return stats
@staticmethod
def format_stats(stats):
if stats is None:
return '(Invalid Stats)'
else:
return '(F1: {f1:.3f}, Error: {err:.3f}, RMSE: {rmse:.3f})'.format(
f1=stats['f1'], err=stats['errorrate'], rmse=stats['RMSE'])
@staticmethod
def format_stats2(stats):
if stats is None:
return 'Invalid Stats'
else:
return 'RMSE: {rmse:.3f}\nF1: {f1:.3f}\nError: {err:.3f}'.format(
f1=stats['f1'], err=stats['errorrate'], rmse=stats['RMSE'])
@classmethod
def aggregate_stats_list(cls, stats_list):
aggregate_ys_label = []
aggregate_ys_label_pred = []
for stats in stats_list:
aggregate_ys_label += stats['ys_label']
aggregate_ys_label_pred += stats['ys_label_pred']
return cls.get_stats(aggregate_ys_label, aggregate_ys_label_pred)
@staticmethod
def get_objective_score(result, type='RMSE'):
"""
Objective score is something to MAXIMIZE. e.g. f1, or -errorrate, or -RMSE.
:param result:
:param type:
:return:
"""
if type == 'f1':
return result['f1']
elif type == 'errorrate':
return -result['errorrate']
elif type == 'RMSE':
return -result['RMSE']
else:
assert False, 'Unknow type: {} for get_objective_score().'.format(type)
class TrainTestModel(TypeVersionEnabled):
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def _train(cls, param_dict, xys_2d):
raise NotImplementedError
@classmethod
@abstractmethod
def _predict(cls, model, xs_2d):
raise NotImplementedError
def __init__(self, param_dict, logger=None, optional_dict2=None):
'''
Put in optional_dict2 optionals that would not impact result, e.g.
path to checkpoint file directories, or h5py file
'''
TypeVersionEnabled.__init__(self)
self.param_dict = param_dict
self.logger = logger
self.optional_dict2 = optional_dict2
self.model_dict = {}
self._assert_args()
def _assert_args(self):
pass
@property
def model_id(self):
return TypeVersionEnabled.get_type_version_string(self)
def _assert_trained(self):
assert 'model_type' in self.model_dict # need this to recover class
assert 'feature_names' in self.model_dict
assert 'norm_type' in self.model_dict
assert 'model' in self.model_dict
norm_type = self.model_dict['norm_type']
assert norm_type == 'none' or norm_type == 'linear_rescale'
if norm_type == 'linear_rescale':
assert 'slopes' in self.model_dict
assert 'intercepts' in self.model_dict
def append_info(self, key, value):
"""
Useful for adding extra info to model before saving. For example,
save feature_dict to model so that when the model is loaded by a
QualityRunner, it knows when features to extract.
"""
self.model_dict[key] = value
def get_appended_info(self, key):
"""
Retrieve info added via the append_info method.
"""
return self.model_dict[key] if key in self.model_dict else None
@property
def feature_names(self):
self._assert_trained()
return self.model_dict['feature_names']
@feature_names.setter
def feature_names(self, value):
self.model_dict['feature_names'] = value
@property
def model_type(self):
return self.model_dict['model_type']
@model_type.setter
def model_type(self, value):
self.model_dict['model_type'] = value
@property
def norm_type(self):
return self.model_dict['norm_type']
@norm_type.setter
def norm_type(self, value):
self.model_dict['norm_type'] = value
@property
def mus(self):
return np.array(self.model_dict['mus'])
@mus.setter
def mus(self, value):
# forcing float, to be used by PicklingTools and read in C++
self.model_dict['mus'] = map(lambda x: float(x), list(value))
@property
def sds(self):
return np.array(self.model_dict['sds'])
@sds.setter
def sds(self, value):
# forcing float, to be used by PicklingTools and read in C++
self.model_dict['sds'] = map(lambda x: float(x), list(value))
@property
def slopes(self):
return np.array(self.model_dict['slopes'])
@slopes.setter
def slopes(self, value):
# forcing float, to be used by PicklingTools and read in C++
self.model_dict['slopes'] = map(lambda x: float(x), list(value))
@property
def intercepts(self):
return np.array(self.model_dict['intercepts'])
@intercepts.setter
def intercepts(self, value):
# forcing float, to be used by PicklingTools and read in C++
self.model_dict['intercepts'] = map(lambda x: float(x), list(value))
@property
def model(self):
return self.model_dict['model']
@model.setter
def model(self, value):
self.model_dict['model'] = value
def to_file(self, filename):
self._assert_trained()
param_dict = self.param_dict
model_dict = self.model_dict
self._to_file(filename, param_dict, model_dict)
@staticmethod
def _to_file(filename, param_dict, model_dict):
info_to_save = {'param_dict': param_dict,
'model_dict': model_dict}
with open(filename, 'wb') as file:
pickle.dump(info_to_save, file)
@classmethod
def from_file(cls, filename, logger=None, optional_dict2=None):
assert os.path.exists(filename), 'File name {} does not exist.'.format(filename)
with open(filename, 'rb') as file:
info_loaded = pickle.load(file)
model_type = info_loaded['model_dict']['model_type']
model_class = TrainTestModel.find_subclass(model_type)
if model_class == cls:
train_test_model = model_class._from_info_loaded(info_loaded, filename,
logger, optional_dict2)
else:
# the newly found model_class can be a different class (e.g. a subclass of cls). In this
# case, call from_file() of that model_class.
train_test_model = model_class.from_file(filename, logger, optional_dict2)
return train_test_model
@classmethod
def _from_info_loaded(cls, info_loaded, filename, logger, optional_dict2):
train_test_model = cls(
param_dict={}, logger=logger, optional_dict2=optional_dict2)
train_test_model.param_dict = info_loaded['param_dict']
train_test_model.model_dict = info_loaded['model_dict']
return train_test_model
def _preproc_train(self, xys):
self.model_type = self.TYPE
assert 'label' in xys
assert 'content_id' in xys
feature_names = self.get_ordered_feature_names(xys)
self.feature_names = feature_names
# note that feature_names is property (write). below cannot yet use
# self.feature_names since additional things (_assert_trained()) is
# not ready yet
xys_2d = self._to_tabular_xys(feature_names, xys)
# calculate normalization parameters,
self._calculate_normalization_params(xys_2d)
# normalize
xys_2d = self._normalize_xys(xys_2d)
return xys_2d
def train(self, xys):
xys_2d = self._preproc_train(xys)
model = self._train(self.param_dict, xys_2d)
self.model = model
@staticmethod
def get_ordered_feature_names(xys_or_xs):
# this makes sure the order of features are normalized, and each
# dimension of xys_2d (or xs_2d) is consistent with feature_names
feature_names = sorted(xys_or_xs.keys())
if 'label' in feature_names:
feature_names.remove('label')
if 'content_id' in feature_names:
feature_names.remove('content_id')
return feature_names
def _calculate_normalization_params(self, xys_2d):
norm_type = self.param_dict['norm_type'] \
if 'norm_type' in self.param_dict else 'none'
if norm_type == 'normalize':
mus = np.mean(xys_2d, axis=0)
sds = np.std(xys_2d, axis=0)
self.slopes = 1.0 / sds
self.intercepts = - mus / sds
self.norm_type = 'linear_rescale'
elif norm_type == 'clip_0to1':
self._calculate_normalization_params_clip_0to1(xys_2d)
elif norm_type == 'custom_clip_0to1':
self._calculate_normalization_params_custom_clip_0to1(xys_2d)
elif norm_type == 'clip_minus1to1':
ub = 1.0
lb = -1.0
fmins = np.min(xys_2d, axis=0)
fmaxs = np.max(xys_2d, axis=0)
self.slopes = (ub - lb) / (fmaxs - fmins)
self.intercepts = (lb*fmaxs - ub*fmins) / (fmaxs - fmins)
self.norm_type = 'linear_rescale'
elif norm_type == 'none':
self.norm_type = 'none'
else:
assert False, 'Incorrect parameter norm type selected: {}' \
.format(self.param_dict['norm_type'])
def _calculate_normalization_params_clip_0to1(self, xys_2d):
ub = 1.0
lb = 0.0
fmins = np.min(xys_2d, axis=0)
fmaxs = np.max(xys_2d, axis=0)
self.slopes = (ub - lb) / (fmaxs - fmins)
self.intercepts = (lb * fmaxs - ub * fmins) / (fmaxs - fmins)
self.norm_type = 'linear_rescale'
def _calculate_normalization_params_custom_clip_0to1(self, xys_2d):
# linearly map the range specified to [0, 1]; if unspecified, use clip_0to1
ub = 1.0
lb = 0.0
fmins = np.min(xys_2d, axis=0)
fmaxs = np.max(xys_2d, axis=0)
if 'custom_clip_0to1_map' in self.param_dict:
custom_map = self.param_dict['custom_clip_0to1_map']
features = self.model_dict['feature_names']
for feature in custom_map:
if feature in features:
fmin, fmax = custom_map[feature]
idx = features.index(feature)
assert len(fmins) == len(features) + 1 # fmins[0] is for y
assert len(fmins) == len(features) + 1 # fmaxs[0] is for y
fmins[idx + 1] = fmin
fmaxs[idx + 1] = fmax
self.slopes = (ub - lb) / (fmaxs - fmins)
self.intercepts = (lb * fmaxs - ub * fmins) / (fmaxs - fmins)
self.norm_type = 'linear_rescale'
def _normalize_xys(self, xys_2d):
if self.norm_type == 'linear_rescale':
xys_2d = self.slopes * xys_2d + self.intercepts
elif self.norm_type == 'none':
pass
else:
assert False, 'Incorrect model norm type selected: {}' \
.format(self.norm_type)
return xys_2d
def denormalize_ys(self, ys_vec):
if self.norm_type == 'linear_rescale':
ys_vec = (ys_vec - self.intercepts[0]) / self.slopes[0]
elif self.norm_type == 'none':
pass
else:
assert False, 'Incorrect model norm type selected: {}' \
.format(self.norm_type)
return ys_vec
def normalize_xs(self, xs_2d):
if self.norm_type == 'linear_rescale':
xs_2d = self.slopes[1:] * xs_2d + self.intercepts[1:]
elif self.norm_type == 'none':
pass
else:
assert False, 'Incorrect model norm type selected: {}' \
.format(self.norm_type)
return xs_2d
def _preproc_predict(self, xs):
self._assert_trained()
feature_names = self.feature_names
for name in feature_names:
assert name in xs
xs_2d = self._to_tabular_xs(feature_names, xs)
# normalize xs
xs_2d = self.normalize_xs(xs_2d)
return xs_2d
def predict(self, xs):
xs_2d = self._preproc_predict(xs)
ys_label_pred = self._predict(self.model, xs_2d)
ys_label_pred = self.denormalize_ys(ys_label_pred)
return {'ys_label_pred': ys_label_pred}
@classmethod
def _to_tabular_xys(cls, xkeys, xys):
xs_2d = None
for name in xkeys:
if xs_2d is None:
xs_2d = np.matrix(xys[name]).T
else:
xs_2d = np.hstack((xs_2d, np.matrix(xys[name]).T))
# combine them
ys_vec = xys['label']
xys_2d = np.array(np.hstack((np.matrix(ys_vec).T, xs_2d)))
return xys_2d
@classmethod
def _to_tabular_xs(cls, xkeys, xs):
xs_2d = []
for name in xkeys:
xs_2d.append(np.array(xs[name]))
xs_2d = np.vstack(xs_2d).T
return xs_2d
def evaluate(self, xs, ys):
ys_label_pred = self.predict(xs)['ys_label_pred']
ys_label = ys['label']
stats = self.get_stats(ys_label, ys_label_pred)
return stats
@classmethod
def delete(cls, filename):
cls._delete(filename)
@staticmethod
def _delete(filename):
if os.path.exists(filename):
os.remove(filename)
@classmethod
def get_xs_from_results(cls, results, indexs=None, aggregate=True):
"""
:param results: list of BasicResult, or pandas.DataFrame
:param indexs: indices of results to be used
:param aggregate: if True, return aggregate score, otherwise per-frame/per-block
"""
try:
if aggregate:
feature_names = results[0].get_ordered_list_score_key()
else:
feature_names = results[0].get_ordered_list_scores_key()
except AttributeError:
# if RawResult, will not have either get_ordered_list_score_key
# or get_ordered_list_scores_key. Instead, just get the sorted keys
feature_names = results[0].get_ordered_results()
cls._assert_dimension(feature_names, results)
# collect results into xs
xs = {}
for name in feature_names:
if indexs is not None:
_results = map(lambda i:results[i], indexs)
else:
_results = results
xs[name] = map(lambda result: result[name], _results)
return xs
@classmethod
def _assert_dimension(cls, feature_names, results):
# by default, only accept result[feature_name] that is a scalar
for name in feature_names:
for result in results:
assert isinstance(result[name], Number)
@staticmethod
def get_per_unit_xs_from_a_result(result):
"""
Similar to get_xs_from_results(), except that instead of intake a list
of Result, each corresponding to an aggregate score, this function takes
a single Result, and interpret its per-frame score as an aggregate score.
:param result: one BasicResult
"""
# need to substitute the score key (e.g. motion_score -> motion_scores)
# to ensure compatibility
feature_names = result.get_ordered_list_scores_key()
new_feature_names = result.get_ordered_list_score_key()
xs = {}
for name, new_name in zip(feature_names, new_feature_names):
xs[new_name] = np.array(result[name])
return xs
@classmethod
def get_ys_from_results(cls, results, indexs=None):
"""
:param results: list of BasicResult, or pandas.DataFrame
:param indexs: indices of results to be used
"""
ys = {}
if indexs is not None:
_results = map(lambda i:results[i], indexs)
else:
_results = results
ys['label'] = \
np.array(map(lambda result: result.asset.groundtruth, _results))
ys['content_id'] = \
np.array(map(lambda result: result.asset.content_id, _results))
return ys
@classmethod
def get_xys_from_results(cls, results, indexs=None, aggregate=True):
"""
:param results: list of BasicResult, or pandas.DataFrame
:param indexs: indices of results to be used
"""
xys = {}
xys.update(cls.get_xs_from_results(results, indexs, aggregate))
xys.update(cls.get_ys_from_results(results, indexs))
return xys
@classmethod
def reset(cls):
# placeholder for adding any reset mechanism to avoid interference
# between experiments
pass
class LibsvmNusvrTrainTestModel(TrainTestModel, RegressorMixin):
TYPE = 'LIBSVMNUSVR'
VERSION = "0.1"
@classmethod
def _train(cls, model_param, xys_2d):
"""
:param model_param:
:param xys_2d:
:return:
"""
kernel = model_param['kernel'] if 'kernel' in model_param else 'rbf'
gamma = model_param['gamma'] if 'gamma' in model_param else 0.0
C = model_param['C'] if 'C' in model_param else 1.0
nu = model_param['nu'] if 'nu' in model_param else 0.5
cache_size = model_param['cache_size'] if 'cache_size' in model_param else 200
try:
svmutil
except NameError:
from vmaf import svmutil
if kernel == 'rbf':
ktype_int = svmutil.RBF
elif kernel == 'linear':
ktype_int = svmutil.LINEAR
elif kernel == 'poly':
ktype_int = svmutil.POLY
elif kernel == 'sigmoid':
ktype_int = svmutil.SIGMOID
else:
assert False, 'ktype = ' + str(kernel) + ' not implemented'
param = svmutil.svm_parameter([
'-s', 4,
'-t', ktype_int,
'-c', C,
'-g', gamma,
'-n', nu,
'-m', cache_size
])
f = list(xys_2d[:, 1:])
for i, item in enumerate(f):
f[i] = list(item)
prob = svmutil.svm_problem(xys_2d[:, 0], f)
model = svmutil.svm_train(prob, param)
return model
@classmethod
def _predict(cls, model, xs_2d):
# override TrainTestModel._predict
try:
svmutil
except NameError:
from vmaf import svmutil
f = list(xs_2d)
for i, item in enumerate(f):
f[i] = list(item)
score, _, _ = svmutil.svm_predict([0] * len(f), f, model)
ys_label_pred = np.array(score)
return ys_label_pred
@staticmethod
def _to_file(filename, param_dict, model_dict):
try:
svmutil
except NameError:
from vmaf import svmutil
# override TrainTestModel._to_file
# special handling of libsvmnusvr: save .model differently
info_to_save = {'param_dict': param_dict,
'model_dict': model_dict.copy()}
svm_model = info_to_save['model_dict']['model']
info_to_save['model_dict']['model'] = None
with open(filename, 'wb') as file:
pickle.dump(info_to_save, file)
svmutil.svm_save_model(filename + '.model', svm_model)
@classmethod
def _from_info_loaded(cls, info_loaded, filename, logger, optional_dict2):
try:
svmutil
except NameError:
from vmaf import svmutil
# override TrainTestModel._from_info_loaded
train_test_model = cls(
param_dict={}, logger=logger, optional_dict2=optional_dict2)
train_test_model.param_dict = info_loaded['param_dict']
train_test_model.model_dict = info_loaded['model_dict']
if issubclass(cls, LibsvmNusvrTrainTestModel):
# == special handling of libsvmnusvr: load .model differently ==
model = svmutil.svm_load_model(filename + '.model')
train_test_model.model_dict['model'] = model
return train_test_model
@classmethod
def _delete(cls, filename):
# override TrainTestModel._delete
if os.path.exists(filename):
os.remove(filename)
if os.path.exists(filename + '.model'):
os.remove(filename + '.model')
@classmethod
def from_raw_file(cls, model_filename, additional_model_dict, logger):
"""
Construct from raw libsvm model file.
:param model_filename:
:param additional_model_dict: must contain keys feature_names, norm_type
and optional slopes and intercepts
:param logger:
:return:
"""
try:
svmutil
except NameError:
from vmaf import svmutil
# assert additional_model_dict
assert 'feature_names' in additional_model_dict
assert 'norm_type' in additional_model_dict
norm_type = additional_model_dict['norm_type']
assert norm_type == 'none' or norm_type == 'linear_rescale'
if norm_type == 'linear_rescale':
assert 'slopes' in additional_model_dict
assert 'intercepts' in additional_model_dict
train_test_model = cls(param_dict={}, logger=logger)
train_test_model.model_dict.update(additional_model_dict)
model = svmutil.svm_load_model(model_filename)
train_test_model.model_dict['model'] = model
return train_test_model
class SklearnRandomForestTrainTestModel(TrainTestModel, RegressorMixin):
TYPE = 'RANDOMFOREST'
VERSION = "0.1"
@classmethod
def _train(cls, model_param, xys_2d):
"""
random forest regression
http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
:param model_param:
:param xys_2d:
:return:
"""
model_param_ = model_param.copy()
# remove keys unassociated with sklearn
if 'norm_type' in model_param_:
del model_param_['norm_type']
if 'score_clip' in model_param_:
del model_param_['score_clip']
if 'custom_clip_0to1_map' in model_param_:
del model_param_['custom_clip_0to1_map']
from sklearn import ensemble
model = ensemble.RandomForestRegressor(
**model_param_
)
model.fit(xys_2d[:, 1:], np.ravel(xys_2d[:, 0]))
return model
@classmethod
def _predict(cls, model, xs_2d):
# directly call sklearn's model's predict() function
ys_label_pred = model.predict(xs_2d)
return ys_label_pred
class SklearnExtraTreesTrainTestModel(TrainTestModel, RegressorMixin):
TYPE = 'EXTRATREES'
VERSION = "0.1"
@classmethod
def _train(cls, model_param, xys_2d):
"""
extremely random trees
http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html
:param model_param:
:param xys_2d:
:return:
"""
model_param_ = model_param.copy()
# remove keys unassociated with sklearn
if 'norm_type' in model_param_:
del model_param_['norm_type']
if 'score_clip' in model_param_:
del model_param_['score_clip']
if 'custom_clip_0to1_map' in model_param_:
del model_param_['custom_clip_0to1_map']
from sklearn import ensemble
model = ensemble.ExtraTreesRegressor(
**model_param_
)
model.fit(xys_2d[:, 1:], np.ravel(xys_2d[:, 0]))
return model
@classmethod
def _predict(cls, model, xs_2d):
# directly call sklearn's model's predict() function
ys_label_pred = model.predict(xs_2d)
return ys_label_pred
class RawVideoTrainTestModelMixin(object):
"""
Contains some key methods to handle input being RawVideoExtractor results.
"""
@classmethod
def _assert_dimension(cls, feature_names, results):
# Override TrainTestModel._assert_dimension. Allow input to be a numpy
# ndarray or equivalent (e.g. H5py object) -- they must have attribute
# 'shape', and the shape must be 3-dimensional (frames, height, width)
assert hasattr(results[0][feature_names[0]], 'shape')
for result in results:
for feature_name in feature_names:
# esult[feature_name] is video of dims: frames, height, width
assert len(result[feature_name].shape) == 3
class MomentRandomForestTrainTestModel(RawVideoTrainTestModelMixin,
# : order affects whose _assert_dimension
# gets called
SklearnRandomForestTrainTestModel,
RegressorMixin,
):
"""
Compute moments based on the input videos (each video of form frames x
width x height 3D-array) and then call a RandomForestTrainTestModel.
For demo purpose only.
"""
TYPE = 'MOMENTRANDOMFOREST'
VERSION = "0.1"
@classmethod
def _to_tabular_xys(cls, xkeys, xys):
# Override TrainTestModel._to_tabular_xys. For each image, extract
# 1st, 2nd moment and var
# get xs first
xs_2d = cls._to_tabular_xs(xkeys, xys)
# combine with ys
ys_vec = xys['label']
xys_2d = np.array(np.hstack((np.matrix(ys_vec).T, xs_2d)))
return xys_2d
@classmethod
def _to_tabular_xs(cls, xkeys, xs):
# Override TrainTestModel._to_tabular_xs
# from raw video to 1st, 2nd moment and var, format xs properly in
# tabular form
sorted_xkeys = sorted(xkeys)
xs_list = []
for key in sorted_xkeys:
videos = xs[key]
video_stats_list = []
for video in videos:
nframes = video.shape[0]
frame_stats = np.zeros((nframes, 3))
for iframe, frame in enumerate(video):
firstm = frame.mean()
variance = frame.var()
secondm = variance + firstm**2
frame_stats[iframe] = (firstm, secondm, variance)
video_stats = np.mean(frame_stats, axis=0)
video_stats_list.append(video_stats)
video_stats_2d = np.vstack(video_stats_list)
xs_list.append(video_stats_2d)
xs_2d = np.hstack(xs_list)
return xs_2d
class BootstrapRegressorMixin(RegressorMixin):
@classmethod
def get_stats(cls, ys_label, ys_label_pred, **kwargs):
# override RegressionMixin.get_stats
try:
assert 'ys_label_pred_bagging' in kwargs
assert 'ys_label_pred_stddev' in kwargs
assert 'ys_label_pred_ci95_low' in kwargs
assert 'ys_label_pred_ci95_high' in kwargs
stats = super(BootstrapRegressorMixin, cls).get_stats(ys_label, ys_label_pred, **kwargs)
stats['ys_label_pred_bagging'] = kwargs['ys_label_pred_bagging']
stats['ys_label_pred_stddev'] = kwargs['ys_label_pred_stddev']
stats['ys_label_pred_ci95_low'] = kwargs['ys_label_pred_ci95_low']
stats['ys_label_pred_ci95_high'] = kwargs['ys_label_pred_ci95_high']
return stats
except AssertionError:
return super(BootstrapRegressorMixin, cls).get_stats(ys_label, ys_label_pred, **kwargs)
@classmethod
def plot_scatter(cls, ax, stats, **kwargs):
# override RegressionMixin.plot_scatter
assert len(stats['ys_label']) == len(stats['ys_label_pred'])
content_ids = kwargs['content_ids'] if 'content_ids' in kwargs else None
point_labels = kwargs['point_labels'] if 'point_labels' in kwargs else None
try:
ci_assume_gaussian = kwargs['ci_assume_gaussian'] if 'ci_assume_gaussian' in kwargs else True
assert 'ys_label_pred_bagging' in stats
assert 'ys_label_pred_stddev' in stats
assert 'ys_label_pred_ci95_low' in stats
assert 'ys_label_pred_ci95_high' in stats
avg_std = np.mean(stats['ys_label_pred_stddev'])
avg_ci95_low = np.mean(stats['ys_label_pred_ci95_low'])
avg_ci95_high = np.mean(stats['ys_label_pred_ci95_high'])
if content_ids is None:
if ci_assume_gaussian:
yerr = 1.96 * stats['ys_label_pred_stddev'] # 95% C.I. (assume Gaussian)
else:
yerr = [stats['ys_label_pred_bagging'] - avg_ci95_low, avg_ci95_high - stats['ys_label_pred_bagging']] # 95% C.I.
ax.errorbar(stats['ys_label'], stats['ys_label_pred'],
yerr=yerr,
marker='o', linestyle='')
else:
assert len(stats['ys_label']) == len(content_ids)
unique_content_ids = list(set(content_ids))
import matplotlib.pyplot as plt
cmap = plt.get_cmap()
colors = [cmap(i) for i in np.linspace(0, 1, len(unique_content_ids))]
for idx, curr_content_id in enumerate(unique_content_ids):
curr_idxs = indices(content_ids, lambda cid: cid == curr_content_id)
curr_ys_label = np.array(stats['ys_label'])[curr_idxs]
curr_ys_label_pred = np.array(stats['ys_label_pred'])[curr_idxs]
curr_ys_label_pred_bagging = np.array(stats['ys_label_pred_bagging'])[curr_idxs]
curr_ys_label_pred_stddev = np.array(stats['ys_label_pred_stddev'])[curr_idxs]
curr_ys_label_pred_ci95_low = np.array(stats['ys_label_pred_ci95_low'])[curr_idxs]
curr_ys_label_pred_ci95_high = np.array(stats['ys_label_pred_ci95_high'])[curr_idxs]
if ci_assume_gaussian:
yerr = 1.96 * curr_ys_label_pred_stddev # 95% C.I. (assume Gaussian)
else:
yerr = [curr_ys_label_pred_bagging - curr_ys_label_pred_ci95_low, curr_ys_label_pred_ci95_high - curr_ys_label_pred_bagging] # 95% C.I.
try:
curr_ys_label_stddev = np.array(stats['ys_label_stddev'])[curr_idxs]
ax.errorbar(curr_ys_label, curr_ys_label_pred,
yerr=yerr,
xerr=1.96 * curr_ys_label_stddev,
marker='o', linestyle='', label=curr_content_id, color=colors[idx % len(colors)])
except:
ax.errorbar(curr_ys_label, curr_ys_label_pred,
yerr=yerr,
marker='o', linestyle='', label=curr_content_id, color=colors[idx % len(colors)])
ax.text(0.45, 0.1, 'Avg. Pred. Std.: {:.2f}'.format(avg_std),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
fontsize=12)
if point_labels:
assert len(point_labels) == len(stats['ys_label'])
for i, point_label in enumerate(point_labels):
ax.annotate(point_label, (stats['ys_label'][i], stats['ys_label_pred'][i]))
except AssertionError:
super(BootstrapRegressorMixin, cls).plot_scatter(ax, stats, **kwargs)
class BootstrapMixin(object):
MIXIN_VERSION = 'B0.0.1'
DEFAULT_NUM_MODELS = 100
def train(self, xys):
# override TrainTestModel.train()
xys_2d = self._preproc_train(xys)
num_models = self._get_num_models()
sample_size = xys_2d.shape[0]
models = []
# first model: use full training data
model_0 = self._train(self.param_dict, xys_2d)
models.append(model_0)
# rest models: resample training data with replacement
for i_model in range(1, num_models):
np.random.seed(i_model) # seed is i_model
# random sample with replacement:
indices = np.random.choice(range(sample_size), size=sample_size, replace=True)
xys_2d_ = xys_2d[indices, :]
model_ = self._train(self.param_dict, xys_2d_)
models.append(model_)
self.model = models
def _get_num_models(self):
num_models = self.param_dict[
'num_models'] if 'num_models' in self.param_dict else self.DEFAULT_NUM_MODELS
return num_models
@classmethod
def _get_num_models_from_param_dict(cls, param_dict):
num_models = param_dict[
'num_models'] if 'num_models' in param_dict else cls.DEFAULT_NUM_MODELS
return num_models
def predict(self, xs):
# override TrainTestModel.predict()
xs_2d = self._preproc_predict(xs)
models = self.model
num_models = self._get_num_models()
assert num_models == len(models)
# first model: conventional prediction
model_0 = models[0]
ys_label_pred = self._predict(model_0, xs_2d)
ys_label_pred = self.denormalize_ys(ys_label_pred)
# rest models: bagging (bootstrap aggregation)
ys_list = []
for model_ in models[1:]:
ys = self._predict(model_, xs_2d)
ys_list.append(ys)
ys_2d = np.vstack(ys_list)
ys_2d = self.denormalize_ys(ys_2d)
ys_label_pred_bagging = np.mean(ys_2d, axis=0)
ys_label_pred_stddev = np.std(ys_2d, axis=0)
ys_label_pred_ci95_low = np.percentile(ys_2d, 2.5, axis=0)
ys_label_pred_ci95_high = np.percentile(ys_2d, 97.5, axis=0)
return {'ys_label_pred': ys_label_pred,
'ys_label_pred_bagging': ys_label_pred_bagging,
'ys_label_pred_stddev': ys_label_pred_stddev,
'ys_label_pred_ci95_low': ys_label_pred_ci95_low,
'ys_label_pred_ci95_high': ys_label_pred_ci95_high,
}
def evaluate_stddev(self, xs):
prediction = self.predict(xs)
return {'mean_stddev': np.mean(prediction['ys_label_pred_stddev']),
'mean_ci95_low': np.mean(prediction['ys_label_pred_ci95_low']),
'mean_ci95_high': np.mean(prediction['ys_label_pred_ci95_high'])}
def evaluate_bagging(self, xs, ys):
ys_label_pred_bagging = self.predict(xs)['ys_label_pred_bagging']
ys_label = ys['label']
stats = self.get_stats(ys_label, ys_label_pred_bagging)
return stats
def to_file(self, filename):
# override TrainTestModel.to_file()
self._assert_trained()
param_dict = self.param_dict
model_dict = self.model_dict
models = self.model
num_models = self._get_num_models()
assert num_models == len(models)
for i_model, model in enumerate(models):
filename_ = self._get_model_i_filename(filename, i_model)
model_dict_ = model_dict.copy()
model_dict_['model'] = model
self._to_file(filename_, param_dict, model_dict_)
@staticmethod
def _get_model_i_filename(filename, i_model):
# first model doesn't have suffix - so it have the same file name as a regular model
if i_model == 0:
filename_ = "{}".format(filename)
else:
filename_ = "{}.{:04d}".format(filename, i_model)
return filename_
@classmethod
def from_file(cls, filename, logger=None, optional_dict2=None):
# override TrainTestModel.from_file()
filename_0 = cls._get_model_i_filename(filename, 0)
assert os.path.exists(filename_0), 'File name {} does not exist.'.format(filename_0)
with open(filename_0, 'rb') as file:
info_loaded_0 = pickle.load(file)
model_type = info_loaded_0['model_dict']['model_type']
model_class = TrainTestModel.find_subclass(model_type)
train_test_model_0 = model_class._from_info_loaded(
info_loaded_0, filename_0, logger, optional_dict2)
num_models = cls._get_num_models_from_param_dict(info_loaded_0['param_dict'])
models = []
for i_model in range(num_models):
filename_ = cls._get_model_i_filename(filename, i_model)
assert os.path.exists(filename_), 'File name {} does not exist.'.format(filename_)
with open(filename_, 'rb') as file:
info_loaded_ = pickle.load(file)
train_test_model_ = model_class._from_info_loaded(info_loaded_, filename_, None, None)
model_ = train_test_model_.model
models.append(model_)
train_test_model_0.model = models
return train_test_model_0
@classmethod
def delete(cls, filename):
# override TrainTestModel.delete()
filename_0 = cls._get_model_i_filename(filename, 0)
assert os.path.exists(filename_0)
with open(filename_0, 'rb') as file:
info_loaded_0 = pickle.load(file)
num_models = cls._get_num_models_from_param_dict(info_loaded_0['param_dict'])
for i_model in range(num_models):
filename_ = cls._get_model_i_filename(filename, i_model)
cls._delete(filename_)
class BootstrapLibsvmNusvrTrainTestModel(BootstrapRegressorMixin, BootstrapMixin, LibsvmNusvrTrainTestModel):
TYPE = 'BOOTSTRAP_LIBSVMNUSVR'
VERSION = LibsvmNusvrTrainTestModel.VERSION + '-' + BootstrapMixin.MIXIN_VERSION
class BootstrapSklearnRandomForestTrainTestModel(BootstrapRegressorMixin, BootstrapMixin, SklearnRandomForestTrainTestModel):
TYPE = 'BOOTSTRAP_RANDOMFOREST'
VERSION = SklearnRandomForestTrainTestModel.VERSION + '-' + BootstrapMixin.MIXIN_VERSION
class ResidueBootstrapMixin(BootstrapMixin):
MIXIN_VERSION = 'RB0.0.1'
def train(self, xys):
# override TrainTestModel.train()
xys_2d = self._preproc_train(xys)
num_models = self._get_num_models()
sample_size = xys_2d.shape[0]
models = []
# first model: use full training data
model_0 = self._train(self.param_dict, xys_2d)
models.append(model_0)
# predict and find residue
ys = xys_2d[:, 0].T
xs_2d = xys_2d[:, 1:]
ys_pred = self._predict(model_0, xs_2d)
residue_ys = ys - ys_pred
# rest models: resample residue data with replacement
for i_model in range(1, num_models):
np.random.seed(i_model) # seed is i_model
# random sample with replacement:
indices = np.random.choice(range(sample_size), size=sample_size, replace=True)
residue_ys_resampled = residue_ys[indices]
ys_resampled = residue_ys_resampled + ys_pred
xys_2d_ = np.array(np.hstack((np.matrix(ys_resampled).T, xs_2d)))
model_ = self._train(self.param_dict, xys_2d_)
models.append(model_)
self.model = models
class ResidueBootstrapLibsvmNusvrTrainTestModel(BootstrapRegressorMixin, ResidueBootstrapMixin, LibsvmNusvrTrainTestModel):
TYPE = 'RESIDUEBOOTSTRAP_LIBSVMNUSVR'
VERSION = LibsvmNusvrTrainTestModel.VERSION + '-' + ResidueBootstrapMixin.MIXIN_VERSION
class ResidueBootstrapRandomForestTrainTestModel(BootstrapRegressorMixin, ResidueBootstrapMixin, SklearnRandomForestTrainTestModel):
TYPE = 'RESIDUEBOOTSTRAP_RANDOMFOREST'
VERSION = SklearnRandomForestTrainTestModel.VERSION + '-' + ResidueBootstrapMixin.MIXIN_VERSION | 37.71305 | 159 | 0.608284 |
79549b115b3636fad32e7445ece75ff4c4a423dc | 18,146 | py | Python | ansys/mapdl/core/_commands/preproc/material_data_tables.py | da1910/pymapdl | 305b70b30e61a78011e974ff4cb409ee21f89e13 | [
"MIT"
] | null | null | null | ansys/mapdl/core/_commands/preproc/material_data_tables.py | da1910/pymapdl | 305b70b30e61a78011e974ff4cb409ee21f89e13 | [
"MIT"
] | null | null | null | ansys/mapdl/core/_commands/preproc/material_data_tables.py | da1910/pymapdl | 305b70b30e61a78011e974ff4cb409ee21f89e13 | [
"MIT"
] | null | null | null | """
These PREP7 commands create and modify the material data tables (that
is, to specify and define material models).
"""
def tb(self, lab="", mat="", ntemp="", npts="", tbopt="", eosopt="",
funcname="", **kwargs):
"""APDL Command: TB
Activates a data table for material properties or special element
input.
Parameters
----------
lab
Material model data table type:
AFDM - Acoustic frequency-dependent material.
AHYPER - Anisotropic hyperelasticity.
ANEL - Anisotropic elasticity.
ANISO - Anisotropic plasticity.
BB - Bergstrom-Boyce.
BH - Magnetic field data.
BISO - Bilinear isotropic hardening using von Mises or Hill plasticity.
BKIN - Bilinear kinematic hardening using von Mises or Hill plasticity.
CAST - Cast iron.
CDM - Mullins effect (for isotropic hyperelasticity models).
CGCR - Fracture criterion for crack-growth simulation (CGROW).
CHABOCHE - Chaboche nonlinear kinematic hardening using von Mises or Hill plasticity.
COMP - Composite damage (explicit dynamic analysis).
CONCR - Concrete element data.
CREEP - Creep. Pure creep, creep with isotropic hardening plasticity, or creep with
kinematic hardening plasticity using both von Mises or Hill
potentials.
CTE - Secant coefficient of thermal expansion.
CZM - Cohesive zone.
DISCRETE - Explicit spring-damper (discrete).
DMGE - Damage evolution law.
DMGI - Damage initiation criteria.
DP - Classic Drucker-Prager plasticity.
DPER - Anisotropic electric permittivity.
EDP - Extended Drucker-Prager (for granular materials such as rock, concrete, soil,
ceramics and other pressure-dependent materials).
ELASTIC - Elasticity. For full harmonic analyses, properties can be defined as frequency-
or temperature-dependent (TBFIELD).
EOS - Equation of state (explicit dynamic analysis).
EVISC - Viscoelastic element data (explicit dynamic analysis).
EXPE - Experimental data.
FCON - Fluid conductance data (explicit dynamic analysis).
FCLI - Material strength limits for calculating failure criteria.
FLUID - Fluid.
FOAM - Foam (explicit dynamic analysis).
FRIC - Coefficient of friction based on Coulomb's Law or user-defined friction.
GASKET - Gasket.
GCAP - Geological cap (explicit dynamic analysis).
GURSON - Gurson pressure-dependent plasticity for porous metals.
HFLM - Film coefficient data.
HILL - Hill anisotropy. When combined with other material options, simulates
plasticity, viscoplasticity, and creep -- all with the Hill
potential.
HONEY - Honeycomb (explicit dynamic analysis).
HYPER - Hyperelasticity material models (Arruda-Boyce, Blatz-Ko, Extended Tube, Gent,
Mooney-Rivlin [default], Neo-Hookean, Ogden, Ogden Foam,
Polynomial Form, Response Function, Yeoh, and user-
defined).
"""
command = "TB,%s,%s,%s,%s,%s,%s,%s" % (str(lab), str(mat), str(
ntemp), str(npts), str(tbopt), str(eosopt), str(funcname))
return self.run(command, **kwargs)
def tbcopy(self, lab="", matf="", matt="", **kwargs):
"""APDL Command: TBCOPY
Copies a data table from one material to another.
Parameters
----------
lab
Data table label. See the TB command for valid labels, and see
"Notes" for Lab = ALL.
matf
Material reference number where data table is to be copied from.
matt
Material reference number where data table is to be copied to.
Notes
-----
The TBCOPY command, with Lab = ALL, copies all of the nonlinear data
defined by the TB command. If you copy a model that includes both yield
behavior constants and linear constants (for example, a BKIN model),
TBCOPY,ALL and MPCOPY are used together to copy the entire model. All
input data associated with the model is copied, that is, all data
defined through the TB and MP commands.
Also, if you copy a material model using the Material Model Interface
(Edit> Copy), both the commands TBCOPY,ALL and MPCOPY are issued,
regardless of whether the model includes linear constants only, or if
it includes a combination of linear and yield behavior constants.
This command is also valid in SOLUTION.
"""
command = "TBCOPY,%s,%s,%s" % (str(lab), str(matf), str(matt))
return self.run(command, **kwargs)
def tbdata(self, stloc="", c1="", c2="", c3="", c4="", c5="", c6="",
**kwargs):
"""APDL Command: TBDATA
Defines data for the material data table.
Parameters
----------
stloc
Starting location in table for entering data. For example, if
STLOC = 1, data input in the C1 field applies to the first table
constant, C2 applies to the second table constant, etc. If
STLOC=5, data input in the C1 field applies to the fifth table
constant, etc. Defaults to the last location filled + 1. The last
location is reset to 1 with each TB or TBTEMP command.
c1, c2, c3, . . . , c6
Data values assigned to six locations starting with STLOC. If a
value is already in this location, it is redefined. A blank value
leaves the existing value unchanged.
Notes
-----
Defines data for the table specified on the last TB command at the
temperature specified on the last TBTEMP command (if applicable). The
type of data table specified in the last TB command determines the
number of data values needed in TBDATA. Data values are linearly
interpolated for temperatures that fall between user defined TBTEMP
values. See Material Models in the Material Reference for the number of
data values required for different material behavior options.
This command is also valid in SOLUTION.
"""
command = "TBDATA,%s,%s,%s,%s,%s,%s,%s" % (
str(stloc), str(c1), str(c2), str(c3), str(c4), str(c5), str(c6))
return self.run(command, **kwargs)
def tbdele(self, lab="", mat1="", mat2="", inc="", **kwargs):
"""APDL Command: TBDELE
Deletes previously defined material data tables.
Parameters
----------
lab
Data table label. (See the TB command for valid labels.) If ALL,
delete all data tables.
mat1, mat2, inc
Delete tables for materials MAT1 to (MAT2 defaults to MAT1) in
steps of INC (defaults to 1). If MAT1= ALL, ignore MAT2 and INC
and delete data tables for all materials.
Notes
-----
This command is also valid in SOLUTION.
"""
command = "TBDELE,%s,%s,%s,%s" % (str(lab), str(mat1), str(mat2), str(inc))
return self.run(command, **kwargs)
def tbeo(self, par="", value="", **kwargs):
"""APDL Command: TBEO
Sets special options or parameters for material data tables.
Parameters
----------
par
Parameter name:
CAPCREEPREG - Available for the viscoplasticity/creep model (TB,CREEP), allows two creep
models to be specified via the same material ID when
used with the Extended Drucker-Prager model (TB,EDP).
value
Parameter value:
SHEA - Use the shear stress-state creep model with the Extended Drucker-Prager model.
Valid only when Par = CAPCREEPREG.
COMP - Use the compaction stress-state creep model with the Extended Drucker-Prager
model. Valid only when Par = CAPCREEPREG.
Notes
-----
Issue the TBEO command after activating the data table (TB) but before
defining data for the table (TBDATA) or a point on a nonlinear data
curve (TBPT).
"""
command = "TBEO,%s,%s" % (str(par), str(value))
return self.run(command, **kwargs)
def tbfield(self, type_="", value="", **kwargs):
"""APDL Command: TBFIELD
Defines values of field variables for material data tables.
Parameters
----------
type_
Type of field variable:
FREQ - A frequency is to be specified in Value
TEMP - A temperature is to be specified in Value
TIME - A time is to be specified in Value
NPRES - A normal pressure is to be specified in Value
SLDA - A total sliding distance (algebraic) is to be specified in Value
SLDI - A total sliding distance (absolute) is to be specified in Value
SLRV - A sliding velocity is to be specified in Value
CYCLE - A healing cycle number is to be specified in Value
UFXX - User-defined field variable (UF01,UF02, ..., UF09)
value
The field value to be referenced (use this command multiple times
to enter values of different field variables).
Notes
-----
Define your data tables as field-variable-dependent (via the
appropriate TB command shown below), then issue the TBFIELD command to
define the field values.
Define data values in ascending order for all field quantities. If a
field value is to be held constant, define it only once; subsequent
definitions are ignored.
There is no limit on the number of values you can specify. The
specified field value remains active until the next TBFIELD command is
input.
After you have defined the field value(s), define your data for the
data tables (TBDATA).
See Understanding Field Variables in the Material Reference for more
information about the interpolation scheme used for field-dependent
material properties.
See Full Harmonic Analysis in the Structural Analysis Guide for more
information about using TBFIELD with TB,ELASTIC or TB,SDAMP.
The TBFIELD command supports the following material models (TB,Lab
commands):
The TEMP value specified on this command corresponds to the average
temperature on the contact surface for contact elements CONTA171,
CONTA172, CONTA173, CONTA174, CONTA175, CONTA176, and CONTA177. For
contact element CONTA178, the TEMP value corresponds to the average
temperature of the nodes.
The TIME value specified on this command corresponds to the analysis
time specified on the TIME command.
The algebraic sliding distance (SLDA) specified on this command is the
total sliding distance (the algebraic sum) as reported in the element
output definitions table for the contact elements (for example, TASS
and TASR output items for CONTA174).
The absolute sliding distance (SLDI) specified on this command is the
total accumulated sliding distance (the absolute sum) as reported in
the element output definitions table for the contact elements (for
example, AASS and AASR output items for CONTA174).
When used with TB,FRIC, field variables defined by TBFIELD are only
available for isotropic friction (TBOPT = ISO) and orthotropic friction
(TBOPT = ORTHO); they are not available for user-defined friction
(TBOPT = USER).
See Contact Friction in the Material Reference for more information
about using TBFIELD with TB,FRIC.
"""
command = "TBFIELD,%s,%s" % (str(type_), str(value))
return self.run(command, **kwargs)
def tbin(self, oper="", par1="", par2="", par3="", par4="", **kwargs):
"""APDL Command: TBIN
Sets parameters used for interpolation of the material data tables.
Parameters
----------
oper
Operation to perform:
Operation to perform: - SCALE
par1
Independent variable, which can be any field variable specified via
the TBFIELD command.
par2
Index of any material parameter specified via the TBDATA command.
par3
Scale to be used for the independent variable. Valid options are
LINEAR (linear) or LOG (logarithmic).
par4
Scale to be used for the dependent variable (the material parameter
specified via Par2). Valid options are LINEAR (linear) or LOG
(logarithmic).
Notes
-----
For a list of the supported material data tables (TB), see Logarithmic
Interpolation and Scaling in the Material Reference.
"""
command = "TBIN,%s,%s,%s,%s,%s" % (
str(oper), str(par1), str(par2), str(par3), str(par4))
return self.run(command, **kwargs)
def tblist(self, lab="", mat="", **kwargs):
"""APDL Command: TBLIST
Lists the material data tables.
Parameters
----------
lab
Data table label. (See the TB command for valid labels.) Defaults
to the active table. If ALL, list data for all labels.
mat
Material number to be listed (defaults to the active material). If
ALL, list data tables for all materials.
Notes
-----
This command is a utility command, valid anywhere.
"""
command = "TBLIST,%s,%s" % (str(lab), str(mat))
return self.run(command, **kwargs)
def tbmodif(self, row="", col="", value="", **kwargs):
"""APDL Command: TBMODIF
Modifies data for the material data table (GUI).
Parameters
----------
row, col
The row and column numbers of the table entry to be modified.
value
The new value to be used in the ROW, COL location.
Notes
-----
The TBMODIF command modifies data for the table specified on the last
TB command.
For temperature-dependent data, the temperature specified on the last
TBTEMP command is used.
TBMODIF is a command generated by the Graphical User Interface (GUI).
It appears in the log file (Jobname.LOG) if a TB material data table is
graphically edited in spreadsheet fashion.
The TBMODIF command is not intended to be typed in directly during an
analysis session (although it can be included in an input file for
batch input or for use with the /INPUT command).
This command is also valid in SOLUTION.
"""
command = "TBMODIF,%s,%s,%s" % (str(row), str(col), str(value))
return self.run(command, **kwargs)
def tbplot(self, lab="", mat="", tbopt="", temp="", segn="", **kwargs):
"""APDL Command: TBPLOT
Displays the material data table.
Parameters
----------
lab
Data table label. Valid labels are: MKIN, KINH, MELAS, MISO,
BKIN, BISO, BH, GASKET, and JOIN. Defaults to the active table
label. For B-H data, also valid are: NB to display NU-B2, MH to
display MU vs. H, and SBH, SNB, SMH to display the slopes of the
corresponding data.
mat
Material number to be displayed (defaults to the active material).
tbopt
Gasket material or joint element material option to be plotted.
ALL - Plots all gasket data.
COMP - Plots gasket compression data only.
LUNL - Plots gasket linear unloading data with compression curve.
NUNL - Plots gasket nonlinear unloading data only.
temp
Specific temperature at which gasket data or joint element material
data will be plotted (used only when Lab = GASKET or JOIN). Use
TEMP = ALL to plot gasket data or joint element material data at
all temperatures.
segn
Segment number of plotted curve (valid only when Lab = GASKET):
NO - Segment number is not added to plotted curve (default).
YES - Segment number is added to plotted curve. This option is ignored if the number
of data points in a curve exceeds 20.
Notes
-----
Only data for stress-strain, B-H, gasket curves, or joint element
nonlinear material model curves can be displayed.
The TBOPT and TEMP values are valid only when Lab = GASKET or JOIN.
The SEGN value is valid only when Lab = GASKET.
This command is valid in any processor.
"""
command = "TBPLOT,%s,%s,%s,%s,%s" % (
str(lab), str(mat), str(tbopt), str(temp), str(segn))
return self.run(command, **kwargs)
def tbpt(self, oper="", x1="", x2="", x3="", xn="", **kwargs):
"""APDL Command: TBPT
Defines a point on a nonlinear data curve.
Parameters
----------
oper
Operation to perform:
DEFI - Defines a new data point (default). The point is inserted into the table in
ascending order of X1. If a point already exists with the
same X1 value, it is replaced.
DELE - Deletes an existing point. The X1 value must match the X1 value of the point
to be deleted (XN is ignored).
x1, x2, ..., xn
The N components of the point. N depends on the type of data table.
Except for TB,EXPE all other TB Tables support only 2 components.
Notes
-----
TBPT defines a point on a nonlinear data curve (such as a stress-strain
curve, B-H curve, etc.) at the temperature specified on the last TBTEMP
command. The meaning of the values depends on the type of data table
specified on the last TB command (MISO, BH, etc.).
This command is also valid in SOLUTION.
"""
command = "TBPT,%s,%s,%s,%s,%s" % (
str(oper), str(x1), str(x2), str(x3), str(xn))
return self.run(command, **kwargs)
def tbtemp(self, temp="", kmod="", **kwargs):
"""APDL Command: TBTEMP
Defines a temperature for a material data table.
Parameters
----------
temp
Temperature value (defaults to 0.0 if KMOD is blank).
kmod
If blank, TEMP defines a new temperature. (Issue TBLIST to list
temperatures and data.)
Notes
-----
The TBTEMP command defines a temperature to be associated with the data
on subsequent TBPT or TBDATA commands.
The defined temperature remains active until the next TBTEMP command is
issued.
Data values must be defined with the temperatures in ascending order.
This command is also valid in SOLUTION.
"""
command = "TBTEMP,%s,%s" % (str(temp), str(kmod))
return self.run(command, **kwargs)
| 32.992727 | 97 | 0.656949 |
79549b2214afbb737512abccdfdbf256ee12c1d6 | 3,828 | py | Python | PyDSS/modes/Dynamic.py | dvaidhyn/PyDSS | 0d220d00900da4945e2ab6e7774de5edb58b36a9 | [
"BSD-3-Clause"
] | null | null | null | PyDSS/modes/Dynamic.py | dvaidhyn/PyDSS | 0d220d00900da4945e2ab6e7774de5edb58b36a9 | [
"BSD-3-Clause"
] | null | null | null | PyDSS/modes/Dynamic.py | dvaidhyn/PyDSS | 0d220d00900da4945e2ab6e7774de5edb58b36a9 | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime, timedelta
from PyDSS.modes.abstract_solver import abstact_solver
import math
class Dynamic(abstact_solver):
def __init__(self, dssInstance, SimulationSettings, Logger):
super().__init__(dssInstance, SimulationSettings, Logger)
print('Running Dynamic simulation')
self.Settings = SimulationSettings
self.pyLogger = Logger
StartDay = SimulationSettings['Project']['Start Day']
StartTimeMin = SimulationSettings['Project']['Start Time (min)']
EndTimeMin = SimulationSettings['Project']['End Time (min)']
sStepResolution = SimulationSettings['Project']['Step resolution (sec)']
self._Time = datetime.strptime('{} {}'.format(SimulationSettings['Project']['Start Year'],
SimulationSettings['Project']['Start Day'] + SimulationSettings['Project'][
'Date offset']
), '%Y %j')
self._Time = self._Time + timedelta(minutes=StartTimeMin)
self._StartTime = self._Time
self._EndTime = datetime.strptime('{} {}'.format(SimulationSettings['Project']['Start Year'],
SimulationSettings['Project']['End Day'] + SimulationSettings['Project'][
'Date offset']
), '%Y %j')
self._EndTime = self._EndTime + timedelta(minutes=EndTimeMin)
self._sStepRes = sStepResolution
self._dssIntance = dssInstance
self._dssSolution = dssInstance.Solution
self.setMode('Dynamic')
self._dssSolution.Hour(StartDay * 24)
self._dssSolution.Seconds(StartTimeMin * 60)
self._dssSolution.Number(1)
self._dssSolution.StepSize(self._sStepRes)
self._dssSolution.MaxControlIterations(SimulationSettings['Project']['Max Control Iterations'])
return
def setFrequency(self, frequency):
self._dssSolution.Frequency(frequency)
return
def getFrequency(self):
return self._dssSolution.Frequency()
def SimulationSteps(self):
Seconds = (self._EndTime - self._StartTime).total_seconds()
Steps = math.ceil(Seconds / self._sStepRes)
return Steps, self._StartTime, self._EndTime
def SolveFor(self, mStartTime, mTimeStep):
Hour = int(mStartTime/60)
Min = mStartTime % 60
self._dssSolution.Hour(Hour)
self._dssSolution.Seconds(Min*60)
self._dssSolution.Number(mTimeStep)
self._dssSolution.Solve()
return
def IncStep(self):
self._dssSolution.StepSize(self._sStepRes)
self._dssSolution.Solve()
self._Time = self._Time + timedelta(seconds=self._sStepRes)
self._Hour = int(self._dssSolution.DblHour() // 1)
self._Second = (self._dssSolution.DblHour() % 1) * 60 * 60
self.pyLogger.debug('OpenDSS time [h] - ' + str(self._dssSolution.DblHour()))
self.pyLogger.debug('PyDSS datetime - ' + str(self._Time))
def GetTotalSeconds(self):
return (self._Time - self._StartTime).total_seconds()
def GetDateTime(self):
return self._Time
def GetStepResolutionSeconds(self):
return self._sStepRes
def GetStepSizeSec(self):
return self._sStepRes
def reSolve(self):
self._dssSolution.StepSize(0)
self._dssSolution.SolveNoControl()
def Solve(self):
self._dssSolution.StepSize(0)
self._dssSolution.Solve()
def getMode(self):
return self._dssSolution.ModeID()
def setMode(self, mode):
self._dssIntance.utils.run_command('Set Mode={}'.format(mode))
| 39.875 | 130 | 0.612591 |
79549bdde8eecfd096a97bf68c12f03385c910ad | 2,727 | py | Python | api/models/users.py | ClosedSesame/api | 275c36ff1cd2328a42f789fb6d6e9fa57bd53e0f | [
"MIT"
] | null | null | null | api/models/users.py | ClosedSesame/api | 275c36ff1cd2328a42f789fb6d6e9fa57bd53e0f | [
"MIT"
] | null | null | null | api/models/users.py | ClosedSesame/api | 275c36ff1cd2328a42f789fb6d6e9fa57bd53e0f | [
"MIT"
] | null | null | null | from sqlalchemy.orm import relationship
from sqlalchemy.exc import DBAPIError
from .associations import accounts_association
from datetime import datetime as dt
from .meta import Base
from cryptacular import bcrypt
from .roles import AccountRole
from sqlalchemy import (
Column,
String,
Integer,
Index,
Text,
DateTime,
ForeignKey,
)
# from .user_accounts import UserAccounts
# from .associations import accounts_association
manager = bcrypt.BCRYPTPasswordManager()
class Users(Base):
__tablename__ = 'users'
# TODO: Assess need for a 'user_name'?
id = Column(Integer, primary_key=True)
email = Column(String, nullable=False, unique=True)
password = Column(String, nullable=False)
roles = relationship(AccountRole, secondary=accounts_association, back_populates='users')
accounts = relationship('UserAccounts', backref='users') # TODO: Reference the table in .managed/associated/schema
date_created = Column(DateTime, default=dt.now())
date_updated = Column(DateTime, default=dt.now(), onupdate=dt.now())
# NOTE: Added account and account_id refs for relationship management
# account_id = Column(Integer, ForeignKey('user_accounts.id'), nullable=False)
# TODO: account needs to be correctly associated with the right table.
#account = relationship('Account', back_populates='location')
def __init__(self, email, password):
self.email = email
# NOTE: Update the password management
self.password = manager.encode(password, 10)
@classmethod
def new(cls, request, email=None, password=None):
"""
"""
print('this is the request in the class method', request)
print('new user added', email, password)
# import pdb;pdb.set_trace()
# if request.dbsession is None:
if request is None:
raise DBAPIError
user = cls(email, password)
request.dbsession.add(user)
request.dbsession.flush()
return request.dbsession.query(cls).filter(
cls.email == email).one_or_none()
@classmethod
def one(cls, request, email=None):
return request.dbsession.query(cls).filter(
cls.email == email).one_or_none()
@classmethod
def check_credentials(cls, request=None, email=None, password=None):
if request.dbsession is None:
raise DBAPIError
try:
query = request.dbsession.query(cls).filter(
cls.email == email).one_or_none()
except DBAPIError:
raise DBAPIError
if query is not None:
if manager.check(query.password, password):
return query
return None
| 30.988636 | 119 | 0.6685 |
79549cd898d5b701d38a1a306572215b68a639a7 | 224 | py | Python | doc/source/sphinxext/sphinx_gallery/__init__.py | dettmann/bolero | fa88be1a1d4ab1e2855d20f5429ac83ed5eb4925 | [
"BSD-3-Clause"
] | 51 | 2017-05-19T13:33:29.000Z | 2022-01-21T10:59:57.000Z | doc/source/sphinxext/sphinx_gallery/__init__.py | dettmann/bolero | fa88be1a1d4ab1e2855d20f5429ac83ed5eb4925 | [
"BSD-3-Clause"
] | 94 | 2017-05-19T19:44:07.000Z | 2021-12-15T13:40:59.000Z | doc/source/sphinxext/sphinx_gallery/__init__.py | dettmann/bolero | fa88be1a1d4ab1e2855d20f5429ac83ed5eb4925 | [
"BSD-3-Clause"
] | 31 | 2017-05-19T19:41:39.000Z | 2021-08-25T14:14:19.000Z | """
Sphinx Gallery
==============
"""
import os
__version__ = '0.1.13'
def glr_path_static():
"""Returns path to packaged static files"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
| 17.230769 | 78 | 0.638393 |
79549d84e3936d1773c6fdbf6e9ad64c5977bcac | 1,710 | py | Python | tools/c7n_kube/setup.py | tjstansell/cloud-custodian | 830a1131537560e5032bc67e52b66e259630e899 | [
"Apache-2.0"
] | 1 | 2021-08-22T12:30:03.000Z | 2021-08-22T12:30:03.000Z | tools/c7n_kube/setup.py | tjstansell/cloud-custodian | 830a1131537560e5032bc67e52b66e259630e899 | [
"Apache-2.0"
] | 8 | 2021-06-02T04:42:28.000Z | 2022-03-10T19:25:52.000Z | tools/c7n_kube/setup.py | tjstansell/cloud-custodian | 830a1131537560e5032bc67e52b66e259630e899 | [
"Apache-2.0"
] | null | null | null | # Automatically generated from poetry/pyproject.toml
# flake8: noqa
# -*- coding: utf-8 -*-
from setuptools import setup
packages = \
['c7n_kube',
'c7n_kube.actions',
'c7n_kube.resources',
'c7n_kube.resources.apps',
'c7n_kube.resources.core']
package_data = \
{'': ['*']}
install_requires = \
['argcomplete (>=1.12.3,<2.0.0)',
'attrs (>=21.2.0,<22.0.0)',
'boto3 (>=1.17.102,<2.0.0)',
'botocore (>=1.20.102,<2.0.0)',
'c7n (>=0.9.13,<0.10.0)',
'importlib-metadata (>=4.6.0,<5.0.0)',
'jmespath (>=0.10.0,<0.11.0)',
'jsonschema (>=3.2.0,<4.0.0)',
'kubernetes>=10.0.1,<11.0.0',
'pyrsistent (>=0.18.0,<0.19.0)',
'python-dateutil (>=2.8.1,<3.0.0)',
'pyyaml (>=5.4.1,<6.0.0)',
's3transfer (>=0.4.2,<0.5.0)',
'six (>=1.16.0,<2.0.0)',
'tabulate (>=0.8.9,<0.9.0)',
'typing-extensions (>=3.10.0.0,<4.0.0.0)',
'urllib3 (>=1.26.6,<2.0.0)',
'zipp (>=3.4.1,<4.0.0)']
setup_kwargs = {
'name': 'c7n-kube',
'version': '0.2.12',
'description': 'Cloud Custodian - Kubernetes Provider',
'license': 'Apache-2.0',
'classifiers': [
'License :: OSI Approved :: Apache Software License',
'Topic :: System :: Systems Administration',
'Topic :: System :: Distributed Computing'
],
'long_description': '# Custodian Kubernetes Support\n\n\nWork in Progress - Not Ready For Use.\n\n',
'long_description_content_type': 'text/markdown',
'author': 'Cloud Custodian Project',
'author_email': None,
'maintainer': None,
'maintainer_email': None,
'url': 'https://cloudcustodian.io',
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'python_requires': '>=3.6,<4.0',
}
setup(**setup_kwargs)
| 28.032787 | 104 | 0.598246 |
79549f79ea83fd1ce4dbc0f37c28b3bb1383cbe0 | 395 | py | Python | app/email.py | Daniel-Muruthi/pitch | df8f7350bcc43fe4cd90c0d819d230b2044ec8af | [
"MIT"
] | null | null | null | app/email.py | Daniel-Muruthi/pitch | df8f7350bcc43fe4cd90c0d819d230b2044ec8af | [
"MIT"
] | null | null | null | app/email.py | Daniel-Muruthi/pitch | df8f7350bcc43fe4cd90c0d819d230b2044ec8af | [
"MIT"
] | null | null | null | from flask_mail import Message
from flask import render_template
from . import mail
def mail_message(subject, template, to, **kwargs):
sender_email = 'adinomuruthi1@gmail.com'
email = Message(subject, sender=sender_email, receipients=[to])
email.body = render_template(template + ".txt", **kwargs)
email.html = render_template(template + ".html", **kwargs)
mail.send(email) | 35.909091 | 67 | 0.729114 |
7954a03d73aaf94146e3e4b4c1324201db07d2f9 | 1,355 | py | Python | plot.py | shivapbhusal/calculate_bandgap | 7f738a35145e6c9c64100d30bf8ae477a627bbba | [
"MIT"
] | null | null | null | plot.py | shivapbhusal/calculate_bandgap | 7f738a35145e6c9c64100d30bf8ae477a627bbba | [
"MIT"
] | null | null | null | plot.py | shivapbhusal/calculate_bandgap | 7f738a35145e6c9c64100d30bf8ae477a627bbba | [
"MIT"
] | null | null | null | '''
A python script to compute bandgap in zinc oxide
Requires 1 argument in command line.
Example, python compute_bandgap doscar
'''
import matplotlib.pyplot as plt
import sys
import numpy as np
'''
Function to convert numbers in E+ and E- exponential format to
normal floating point numbers.
'''
def stringToFloat(myStr):
if 'E+' in myStr:
myStr=myStr.split('E+')
return float(myStr[0])*pow(10,float(myStr[1]))
elif 'E-' in myStr:
myStr=myStr.split('E-')
return float(myStr[0])* pow(10,float(myStr[1])*-1)
else:
return float(myStr)
doscarFile=open(sys.argv[1])
result=[] # List to keep the result
x_values=[]
y_values=[]
is_zero=False
'''
Reads each lines from the Doscar file, filtres out the lines with first column in the range -3 to 3.
For each of these lines, finds the first occurance of 0 in the 2nd column
Appends the result until it finds the first occurance of non-zero.
Appends the first first occurance of non-zero.
The loop stops.
'''
for lines in doscarFile:
lines=lines.strip().split(' ')
if stringToFloat(lines[0])>=-3 and stringToFloat(lines[0])<=3:
x_values.append(stringToFloat(lines[0]))
y_values.append(stringToFloat(lines[1]))
doscarFile.close()
print(x_values)
print(y_values)
plt.plot(x_values,y_values,'ro')
plt.axis([-3,3,0,400])
plt.show()
| 25.566038 | 101 | 0.697417 |
7954a0b0cfd197e85cbba63263f3dc9ac9604572 | 106,832 | py | Python | vectorbt/portfolio/nb.py | jacopomaroli/vectorbt | b63def506735e06262ec7f95bf7622cadd4c9e2e | [
"Apache-2.0"
] | 1 | 2021-03-28T23:59:08.000Z | 2021-03-28T23:59:08.000Z | vectorbt/portfolio/nb.py | dougransom/vectorbt | 44968ac579a1420f713df326eb730bae93041622 | [
"Apache-2.0"
] | null | null | null | vectorbt/portfolio/nb.py | dougransom/vectorbt | 44968ac579a1420f713df326eb730bae93041622 | [
"Apache-2.0"
] | null | null | null | """Numba-compiled functions.
Provides an arsenal of Numba-compiled functions that are used for portfolio
modeling, such as generating and filling orders. These only accept NumPy arrays and
other Numba-compatible types.
!!! note
vectorbt treats matrices as first-class citizens and expects input arrays to be
2-dim, unless function has suffix `_1d` or is meant to be input to another function.
All functions passed as argument should be Numba-compiled.
Records should retain the order they were created in.
!!! warning
Accumulation of roundoff error possible.
See [here](https://en.wikipedia.org/wiki/Round-off_error#Accumulation_of_roundoff_error) for explanation.
Rounding errors can cause trades and positions to not close properly.
Example:
>>> print('%.50f' % 0.1) # has positive error
0.10000000000000000555111512312578270211815834045410
>>> # many buy transactions with positive error -> cannot close position
>>> sum([0.1 for _ in range(1000000)]) - 100000
1.3328826753422618e-06
>>> print('%.50f' % 0.3) # has negative error
0.29999999999999998889776975374843459576368331909180
>>> # many sell transactions with negative error -> cannot close position
>>> 300000 - sum([0.3 for _ in range(1000000)])
5.657668225467205e-06
While vectorbt has implemented tolerance checks when comparing floats for equality,
adding/subtracting small amounts large number of times may still introduce a noticable
error that cannot be corrected post factum.
To mitigate this issue, avoid repeating lots of micro-transactions of the same sign.
For example, reduce by `np.inf` or `shares_now` to close a long/short position.
See `vectorbt.utils.math` for current tolerance values.
"""
import numpy as np
from numba import njit
from vectorbt.utils.math import (
is_close_nb,
is_close_or_less_nb,
is_less_nb,
add_nb
)
from vectorbt.utils.array import insert_argsort_nb
from vectorbt.base.reshape_fns import flex_select_auto_nb
from vectorbt.generic import nb as generic_nb
from vectorbt.portfolio.enums import (
SimulationContext,
GroupContext,
RowContext,
SegmentContext,
OrderContext,
CallSeqType,
SizeType,
ConflictMode,
Order,
NoOrder,
OrderStatus,
OrderSide,
StatusInfo,
OrderResult,
RejectedOrderError,
Direction,
order_dt,
TradeDirection,
TradeStatus,
trade_dt,
position_dt,
log_dt
)
# ############# Simulation ############# #
@njit(cache=True)
def fill_req_log_nb(cash_now, shares_now, val_price_now, value_now, order, log_record):
"""Fill log record on order request."""
log_record['cash_now'] = cash_now
log_record['shares_now'] = shares_now
log_record['val_price_now'] = val_price_now
log_record['value_now'] = value_now
log_record['size'] = order.size
log_record['size_type'] = order.size_type
log_record['direction'] = order.direction
log_record['price'] = order.price
log_record['fees'] = order.fees
log_record['fixed_fees'] = order.fixed_fees
log_record['slippage'] = order.slippage
log_record['min_size'] = order.min_size
log_record['max_size'] = order.max_size
log_record['reject_prob'] = order.reject_prob
log_record['allow_partial'] = order.allow_partial
log_record['raise_reject'] = order.raise_reject
log_record['log'] = order.log
@njit(cache=True)
def fill_res_log_nb(new_cash, new_shares, order_result, log_record):
"""Fill log record on order result."""
log_record['new_cash'] = new_cash
log_record['new_shares'] = new_shares
log_record['res_size'] = order_result.size
log_record['res_price'] = order_result.price
log_record['res_fees'] = order_result.fees
log_record['res_side'] = order_result.side
log_record['res_status'] = order_result.status
log_record['res_status_info'] = order_result.status_info
@njit(cache=True)
def order_not_filled_nb(cash_now, shares_now, status, status_info, log_record, log):
"""Return `cash_now`, `shares_now` and `OrderResult` for order that hasn't been filled."""
order_result = OrderResult(np.nan, np.nan, np.nan, -1, status, status_info)
if log:
fill_res_log_nb(cash_now, shares_now, order_result, log_record)
return cash_now, shares_now, order_result
@njit(cache=True)
def buy_shares_nb(cash_now, shares_now, size, direction, price, fees, fixed_fees, slippage,
min_size, allow_partial, raise_reject, log_record, log):
"""Buy shares."""
# Get optimal order size
if direction == Direction.ShortOnly:
adj_size = min(-shares_now, size)
else:
adj_size = size
# Get price adjusted with slippage
adj_price = price * (1 + slippage)
# Get cash required to complete this order
req_cash = adj_size * adj_price
req_fees = req_cash * fees + fixed_fees
adj_req_cash = req_cash + req_fees
if is_close_or_less_nb(adj_req_cash, cash_now):
# Sufficient cash
final_size = adj_size
fees_paid = req_fees
final_cash = adj_req_cash
# Update current cash
new_cash = add_nb(cash_now, -final_cash)
else:
# Insufficient cash, size will be less than requested
if is_close_or_less_nb(cash_now, fixed_fees):
# Can't fill
if raise_reject:
raise RejectedOrderError("Order rejected: Not enough cash to cover fees")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.CantCoverFees,
log_record, log)
# For fees of 10% and 1$ per transaction, you can buy shares for 90$ (effect_cash)
# to spend 100$ (adj_req_cash) in total
final_cash = (cash_now - fixed_fees) / (1 + fees)
# Update size and fees
final_size = final_cash / adj_price
fees_paid = cash_now - final_cash
# Update current cash
new_cash = 0. # numerical stability
# Check against minimum size
if abs(final_size) < min_size:
if raise_reject:
raise RejectedOrderError("Order rejected: Final size is less than minimum allowed")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.MinSizeNotReached,
log_record, log)
# Check against partial fill (np.inf doesn't count)
if np.isfinite(size) and is_less_nb(final_size, size) and not allow_partial:
if raise_reject:
raise RejectedOrderError("Order rejected: Final size is less than requested")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.PartialFill,
log_record, log)
# Update current shares
new_shares = add_nb(shares_now, final_size)
# Return filled order
order_result = OrderResult(
final_size,
adj_price,
fees_paid,
OrderSide.Buy,
OrderStatus.Filled,
-1
)
if log:
fill_res_log_nb(new_cash, new_shares, order_result, log_record)
return new_cash, new_shares, order_result
@njit(cache=True)
def sell_shares_nb(cash_now, shares_now, size, direction, price, fees, fixed_fees, slippage,
min_size, allow_partial, raise_reject, log_record, log):
"""Sell shares."""
# Get optimal order size
if direction == Direction.LongOnly:
final_size = min(shares_now, size)
else:
final_size = size
# Check against minimum size
if abs(final_size) < min_size:
if raise_reject:
raise RejectedOrderError("Order rejected: Final size is less than minimum allowed")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.MinSizeNotReached,
log_record, log)
# Check against partial fill
if np.isfinite(size) and is_less_nb(final_size, size) and not allow_partial:
# np.inf doesn't count
if raise_reject:
raise RejectedOrderError("Order rejected: Final size is less than requested")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.PartialFill,
log_record, log)
# Get price adjusted with slippage
adj_price = price * (1 - slippage)
# Compute acquired cash
acq_cash = final_size * adj_price
# Update fees
fees_paid = acq_cash * fees + fixed_fees
# Get final cash by subtracting costs
if is_less_nb(acq_cash, fees_paid):
# Can't fill
if raise_reject:
raise RejectedOrderError("Order rejected: Fees cannot be covered")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.CantCoverFees,
log_record, log)
final_cash = acq_cash - fees_paid
# Update current cash and shares
new_cash = cash_now + final_cash
new_shares = add_nb(shares_now, -final_size)
# Return filled order
order_result = OrderResult(
final_size,
adj_price,
fees_paid,
OrderSide.Sell,
OrderStatus.Filled,
-1
)
if log:
fill_res_log_nb(new_cash, new_shares, order_result, log_record)
return new_cash, new_shares, order_result
@njit(cache=True)
def process_order_nb(cash_now, shares_now, val_price_now, value_now, order, log_record):
"""Process an order given current cash and share balance.
Args:
cash_now (float): Cash available to this asset or group with cash sharing.
shares_now (float): Holdings of this particular asset.
val_price_now (float): Valuation price for this particular asset.
Used to convert `SizeType.TargetValue` to `SizeType.TargetShares`.
value_now (float): Value of this asset or group with cash sharing.
Used to convert `SizeType.TargetPercent` to `SizeType.TargetValue`.
order (Order): See `vectorbt.portfolio.enums.Order`.
log_record (log_dt): Record of type `vectorbt.portfolio.enums.log_dt`.
Error is thrown if an input has value that is not expected.
Order is ignored if its execution has no effect on current balance.
Order is rejected if an input goes over a limit/restriction.
"""
if order.log:
fill_req_log_nb(cash_now, shares_now, val_price_now, value_now, order, log_record)
if np.isnan(order.size):
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Ignored, StatusInfo.SizeNaN,
log_record, order.log)
if np.isnan(order.price):
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Ignored, StatusInfo.PriceNaN,
log_record, order.log)
# Check variables
if np.isnan(cash_now) or cash_now < 0:
raise ValueError("cash_now must be greater than 0")
if not np.isfinite(shares_now):
raise ValueError("shares_now must be finite")
# Check order
if order.size_type < 0 or order.size_type >= len(SizeType):
raise ValueError("order.size_type is invalid")
if order.direction < 0 or order.direction >= len(Direction):
raise ValueError("order.direction is invalid")
if order.direction == Direction.LongOnly and shares_now < 0:
raise ValueError("shares_now is negative but order.direction is Direction.LongOnly")
if order.direction == Direction.ShortOnly and shares_now > 0:
raise ValueError("shares_now is positive but order.direction is Direction.ShortOnly")
if not np.isfinite(order.price) or order.price <= 0:
raise ValueError("order.price must be finite and greater than 0")
if not np.isfinite(order.fees) or order.fees < 0:
raise ValueError("order.fees must be finite and 0 or greater")
if not np.isfinite(order.fixed_fees) or order.fixed_fees < 0:
raise ValueError("order.fixed_fees must be finite and 0 or greater")
if not np.isfinite(order.slippage) or order.slippage < 0:
raise ValueError("order.slippage must be finite and 0 or greater")
if not np.isfinite(order.min_size) or order.min_size < 0:
raise ValueError("order.min_size must be finite and 0 or greater")
if order.max_size <= 0:
raise ValueError("order.max_size must be greater than 0")
if not np.isfinite(order.reject_prob) or order.reject_prob < 0 or order.reject_prob > 1:
raise ValueError("order.reject_prob must be between 0 and 1")
order_size = order.size
order_size_type = order.size_type
if order.direction == Direction.ShortOnly:
# Positive size in short direction should be treated as negative
order_size *= -1
if order_size_type == SizeType.TargetPercent:
# Target percentage of current value
if np.isnan(value_now):
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Ignored, StatusInfo.ValueNaN,
log_record, order.log)
if value_now <= 0:
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.ValueZeroNeg,
log_record, order.log)
order_size *= value_now
order_size_type = SizeType.TargetValue
if order_size_type == SizeType.TargetValue:
# Target value
if np.isinf(val_price_now) or val_price_now <= 0:
raise ValueError("val_price_now must be finite and greater than 0")
if np.isnan(val_price_now):
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Ignored, StatusInfo.ValPriceNaN,
log_record, order.log)
order_size = order_size / val_price_now
order_size_type = SizeType.TargetShares
if order_size_type == SizeType.TargetShares:
# Target amount of shares
order_size -= shares_now
order_size_type = SizeType.Shares
cash_limit = cash_now
if order_size_type == SizeType.Percent:
if order_size > 0:
# Amount of available cash -> set upper limit to cash_now
cash_limit = order_size * cash_now
order_size = np.inf
if order_size < 0:
# Amount of available shares
order_size *= abs(shares_now)
order_size_type = SizeType.Shares
if order.direction == Direction.ShortOnly or order.direction == Direction.All:
if order_size < 0 and np.isinf(order_size):
# Similar to going all long, going all short also depends upon current funds
# If in short position, also subtract cash that covers this position (1:1)
# This way, two successive -np.inf operations with same price will trigger only one short
order_size = -2 * shares_now - cash_limit / order.price
if order_size >= 0:
if order.raise_reject:
raise RejectedOrderError("Order rejected: Not enough cash to short")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.NoCashShort,
log_record, order.log)
if is_close_nb(order_size, 0):
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Ignored, StatusInfo.SizeZero,
log_record, order.log)
if abs(order_size) > order.max_size:
if not order.allow_partial:
if order.raise_reject:
raise RejectedOrderError("Order rejected: Size is greater than maximum allowed")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.MaxSizeExceeded,
log_record, order.log)
order_size = np.sign(order_size) * order.max_size
if order.reject_prob > 0:
if np.random.uniform(0, 1) < order.reject_prob:
if order.raise_reject:
raise RejectedOrderError("Random event happened")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.RandomEvent,
log_record, order.log)
if order_size > 0:
if order.direction == Direction.LongOnly or order.direction == Direction.All:
if is_close_nb(cash_limit, 0):
if order.raise_reject:
raise RejectedOrderError("Order rejected: Not enough cash to long")
return order_not_filled_nb(
0., shares_now,
OrderStatus.Rejected, StatusInfo.NoCashLong,
log_record, order.log)
if np.isinf(order_size) and np.isinf(cash_limit):
raise ValueError("Attempt to go in long direction indefinitely. Set max_size or finite init_cash.")
else:
if is_close_nb(shares_now, 0):
if order.raise_reject:
raise RejectedOrderError("Order rejected: No open position to reduce/close")
return order_not_filled_nb(
cash_now, 0.,
OrderStatus.Rejected, StatusInfo.NoOpenPosition,
log_record, order.log)
new_cash, new_shares, order_result = buy_shares_nb(
cash_limit,
shares_now,
order_size,
order.direction,
order.price,
order.fees,
order.fixed_fees,
order.slippage,
order.min_size,
order.allow_partial,
order.raise_reject,
log_record,
order.log
)
else:
if order.direction == Direction.ShortOnly or order.direction == Direction.All:
if np.isinf(order_size):
raise ValueError("Attempt to go in short direction indefinitely. Set max_size or finite init_cash.")
else:
if is_close_nb(shares_now, 0):
if order.raise_reject:
raise RejectedOrderError("Order rejected: No open position to reduce/close")
return order_not_filled_nb(
cash_now, 0.,
OrderStatus.Rejected, StatusInfo.NoOpenPosition,
log_record, order.log)
new_cash, new_shares, order_result = sell_shares_nb(
cash_limit,
shares_now,
-order_size,
order.direction,
order.price,
order.fees,
order.fixed_fees,
order.slippage,
order.min_size,
order.allow_partial,
order.raise_reject,
log_record,
order.log
)
if is_less_nb(cash_limit, cash_now):
new_cash += cash_now - cash_limit
return new_cash, new_shares, order_result
@njit(cache=True)
def create_order_nb(size=np.nan,
size_type=SizeType.Shares,
direction=Direction.All,
price=np.nan,
fees=0.,
fixed_fees=0.,
slippage=0.,
min_size=0.,
max_size=np.inf,
reject_prob=0.,
allow_partial=True,
raise_reject=False,
log=False):
"""Convenience function to create an order with some defaults."""
return Order(
float(size),
size_type,
direction,
float(price),
float(fees),
float(fixed_fees),
float(slippage),
float(min_size),
float(max_size),
float(reject_prob),
allow_partial,
raise_reject,
log
)
@njit(cache=True)
def order_nothing():
"""Convenience function to order nothing."""
return NoOrder
@njit(cache=True)
def check_group_lens(group_lens, n_cols):
"""Check `group_lens`."""
if np.sum(group_lens) != n_cols:
raise ValueError("group_lens has incorrect total number of columns")
@njit(cache=True)
def check_group_init_cash(group_lens, n_cols, init_cash, cash_sharing):
"""Check `init_cash`."""
if cash_sharing:
if len(init_cash) != len(group_lens):
raise ValueError("If cash sharing is enabled, init_cash must match the number of groups")
else:
if len(init_cash) != n_cols:
raise ValueError("If cash sharing is disabled, init_cash must match the number of columns")
@njit(cache=True)
def get_record_idx_nb(target_shape, i, col):
"""Get record index by position of order in the matrix."""
return col * target_shape[0] + i
@njit(cache=True)
def is_grouped_nb(group_lens):
"""Check if columm,ns are grouped, that is, more than one column per group."""
return np.any(group_lens > 1)
@njit(cache=True)
def shuffle_call_seq_nb(call_seq, group_lens):
"""Shuffle the call sequence array."""
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
for i in range(call_seq.shape[0]):
np.random.shuffle(call_seq[i, from_col:to_col])
from_col = to_col
@njit(cache=True)
def build_call_seq_nb(target_shape, group_lens, call_seq_type=CallSeqType.Default):
"""Build a new call sequence array."""
if call_seq_type == CallSeqType.Reversed:
out = np.full(target_shape[1], 1, dtype=np.int_)
out[np.cumsum(group_lens)[1:] - group_lens[1:] - 1] -= group_lens[1:]
out = np.cumsum(out[::-1])[::-1] - 1
out = out * np.ones((target_shape[0], 1), dtype=np.int_)
return out
out = np.full(target_shape[1], 1, dtype=np.int_)
out[np.cumsum(group_lens)[:-1]] -= group_lens[:-1]
out = np.cumsum(out) - 1
out = out * np.ones((target_shape[0], 1), dtype=np.int_)
if call_seq_type == CallSeqType.Random:
shuffle_call_seq_nb(out, group_lens)
return out
def require_call_seq(call_seq):
"""Force the call sequence array to pass our requirements."""
return np.require(call_seq, dtype=np.int_, requirements=['A', 'O', 'W', 'F'])
def build_call_seq(target_shape, group_lens, call_seq_type=CallSeqType.Default):
"""Not compiled but faster version of `build_call_seq_nb`."""
call_seq = np.full(target_shape[1], 1, dtype=np.int_)
if call_seq_type == CallSeqType.Reversed:
call_seq[np.cumsum(group_lens)[1:] - group_lens[1:] - 1] -= group_lens[1:]
call_seq = np.cumsum(call_seq[::-1])[::-1] - 1
else:
call_seq[np.cumsum(group_lens[:-1])] -= group_lens[:-1]
call_seq = np.cumsum(call_seq) - 1
call_seq = np.broadcast_to(call_seq, target_shape)
if call_seq_type == CallSeqType.Random:
call_seq = require_call_seq(call_seq)
shuffle_call_seq_nb(call_seq, group_lens)
return require_call_seq(call_seq)
@njit(cache=True)
def empty_prep_nb(context, *args):
"""Preparation function that forwards received arguments down the stack."""
return args
@njit(cache=True)
def get_group_value_nb(from_col, to_col, cash_now, last_shares, last_val_price):
"""Get group value."""
group_value = cash_now
group_len = to_col - from_col
for k in range(group_len):
col = from_col + k
if last_shares[col] != 0:
group_value += last_shares[col] * last_val_price[col]
return group_value
@njit(cache=True)
def get_group_value_ctx_nb(sc_oc):
"""Get group value from context.
Accepts `vectorbt.portfolio.enums.SegmentContext` and `vectorbt.portfolio.enums.OrderContext`.
Best called once from `segment_prep_func_nb`.
To set the valuation price, change `last_val_price` of the context in-place.
!!! note
Cash sharing must be enabled."""
if not sc_oc.cash_sharing:
raise ValueError("Cash sharing must be enabled")
return get_group_value_nb(
sc_oc.from_col,
sc_oc.to_col,
sc_oc.last_cash[sc_oc.group],
sc_oc.last_shares,
sc_oc.last_val_price
)
@njit(cache=True)
def approx_order_value_nb(size, size_type, cash_now, shares_now, val_price_now, value_now, direction):
"""Approximate value of an order."""
if direction == Direction.ShortOnly:
size *= -1
holding_value_now = shares_now * val_price_now
if size_type == SizeType.Shares:
return size * val_price_now
if size_type == SizeType.Percent:
if size >= 0:
return size * cash_now
else:
return size * abs(holding_value_now)
if size_type == SizeType.TargetShares:
return size * val_price_now - holding_value_now
if size_type == SizeType.TargetValue:
return size - holding_value_now
if size_type == SizeType.TargetPercent:
return size * value_now - holding_value_now
return np.nan
@njit(cache=True)
def auto_call_seq_ctx_nb(sc, size, size_type, direction, temp_float_arr):
"""Generate call sequence based on order value dynamically, for example, to rebalance.
Accepts `vectorbt.portfolio.enums.SegmentContext`.
Arrays `size`, `size_type`, `direction` and `temp_float_arr` should match the number
of columns in the group. Array `temp_float_arr` should be empty and will contain
sorted order values after execution.
Best called once from `segment_prep_func_nb`.
!!! note
Cash sharing must be enabled and `call_seq_now` should follow `CallSeqType.Default`."""
if not sc.cash_sharing:
raise ValueError("Cash sharing must be enabled")
group_value_now = get_group_value_ctx_nb(sc)
group_len = sc.to_col - sc.from_col
for k in range(group_len):
if sc.call_seq_now[k] != k:
raise ValueError("call_seq_now should follow CallSeqType.Default")
col = sc.from_col + k
if sc.cash_sharing:
cash_now = sc.last_cash[sc.group]
else:
cash_now = sc.last_cash[col]
temp_float_arr[k] = approx_order_value_nb(
size[k],
size_type[k],
cash_now,
sc.last_shares[col],
sc.last_val_price[col],
group_value_now,
direction[k]
)
# Sort by order value
insert_argsort_nb(temp_float_arr, sc.call_seq_now)
@njit
def simulate_nb(target_shape, close, group_lens, init_cash, cash_sharing, call_seq, active_mask,
prep_func_nb, prep_args, group_prep_func_nb, group_prep_args, segment_prep_func_nb,
segment_prep_args, order_func_nb, order_args, max_orders, max_logs):
"""Simulate a portfolio by generating and filling orders.
Starting with initial cash `init_cash`, iterates over each group and column over shape `target_shape`,
and for each data point, generates an order using `order_func_nb`. Tries then to fulfill that
order. If unsuccessful due to insufficient cash/shares, always orders the available fraction.
Updates then the current cash and shares balance.
Returns order records of layout `vectorbt.portfolio.enums.order_dt` and log records of layout
`vectorbt.portfolio.enums.log_dt`.
As opposed to `simulate_row_wise_nb`, order processing happens in row-major order, that is,
from top to bottom slower (along time axis) and from left to right faster (along asset axis).
See [Glossary](https://numpy.org/doc/stable/glossary.html).
Args:
target_shape (tuple): Target shape.
A tuple with exactly two elements: the number of steps and columns.
close (array_like of float): Reference price, such as close.
Should have shape `target_shape`.
group_lens (array_like of int): Column count per group.
Even if columns are not grouped, `group_lens` should contain ones - one column per group.
init_cash (array_like of float): Initial capital per column, or per group if cash sharing is enabled.
If `cash_sharing` is True, should have shape `(target_shape[0], group_lens.shape[0])`.
Otherwise, should have shape `target_shape`.
cash_sharing (bool): Whether to share cash within the same group.
call_seq (array_like of int): Default sequence of calls per row and group.
Should have shape `target_shape` and each value indicate the index of a column in a group.
!!! note
To use `auto_call_seq_ctx_nb`, should be of `CallSeqType.Default`.
active_mask (array_like of bool): Mask of whether a particular segment should be executed.
A segment is simply a sequence of `order_func_nb` calls under the same group and row.
Should have shape `(target_shape[0], group_lens.shape[0])`.
prep_func_nb (callable): Simulation preparation function.
Can be used for creation of global arrays and setting the seed, and is executed at the
beginning of the simulation. It should accept `*prep_args`, and return a tuple of any
content, which is then passed to `group_prep_func_nb`.
prep_args (tuple): Packed arguments passed to `prep_func_nb`.
group_prep_func_nb (callable): Group preparation function.
Executed before each group. Should accept the current group context
`vectorbt.portfolio.enums.GroupContext`, unpacked tuple from `prep_func_nb`, and
`*group_prep_args`. Should return a tuple of any content, which is then passed to
`segment_prep_func_nb`.
group_prep_args (tuple): Packed arguments passed to `group_prep_func_nb`.
segment_prep_func_nb (callable): Segment preparation function.
Executed before each row in a group. Should accept the current segment context
`vectorbt.portfolio.enums.SegmentContext`, unpacked tuple from `group_prep_func_nb`,
and `*segment_prep_args`. Should return a tuple of any content, which is then
passed to `order_func_nb`.
!!! note
To change the call sequence of the segment, access `SegmentContext.call_seq_now`
and change it in-place. Make sure to not generate any new arrays as it may
negatively impact performance. Assigning `SegmentContext.call_seq_now` is not allowed.
!!! note
Use `last_val_price` to manipulate group valuation. By default, `last_val_price`
contains the last `close` for a column. You can change it in-place.
The column/group is then valuated after `segment_prep_func_nb`, and the value is
passed as `value_now` to `order_func_nb` and internally used for converting
`SizeType.TargetPercent` and `SizeType.TargetValue` to `SizeType.TargetShares`.
segment_prep_args (tuple): Packed arguments passed to `segment_prep_func_nb`.
order_func_nb (callable): Order generation function.
Used for either generating an order or skipping. Should accept the current order context
`vectorbt.portfolio.enums.OrderContext`, unpacked tuple from `segment_prep_func_nb`, and
`*order_args`. Should either return `vectorbt.portfolio.enums.Order`, or
`vectorbt.portfolio.enums.NoOrder` to do nothing.
order_args (tuple): Arguments passed to `order_func_nb`.
max_orders (int): Size of the order records array.
max_logs (int): Size of the log records array.
!!! note
Broadcasting isn't done automatically: you should either broadcast inputs before passing them
to `order_func_nb`, or use flexible indexing - `vectorbt.base.reshape_fns.flex_choose_i_and_col_nb`
together with `vectorbt.base.reshape_fns.flex_select_nb`.
Also remember that indexing of 2-dim arrays in vectorbt follows that of pandas: `a[i, col]`.
!!! note
Function `group_prep_func_nb` is only called if there is at least on active segment in
the group. Functions `segment_prep_func_nb` and `order_func_nb` are only called if their
segment is active. If the main task of `group_prep_func_nb` is to activate/deactivate segments,
all segments should be activated by default to allow `group_prep_func_nb` to be called.
!!! warning
You can only safely access data of columns that are to the left of the current group and
rows that are to the top of the current row within the same group. Other data points have
not been processed yet and thus empty. Accessing them will not trigger any errors or warnings,
but provide you with arbitrary data (see [np.empty](https://numpy.org/doc/stable/reference/generated/numpy.empty.html)).
## Example
Create a group of three assets together sharing 100$ and simulate an equal-weighted portfolio
that rebalances every second tick, all without leaving Numba:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from vectorbt.generic.plotting import Scatter
>>> from vectorbt.records.nb import col_map_nb
>>> from vectorbt.portfolio.enums import SizeType, Direction
>>> from vectorbt.portfolio.nb import (
... create_order_nb,
... simulate_nb,
... build_call_seq,
... auto_call_seq_ctx_nb,
... share_flow_nb,
... shares_nb,
... holding_value_nb
... )
>>> @njit
... def prep_func_nb(simc): # do nothing
... print('preparing simulation')
... return ()
>>> @njit
... def group_prep_func_nb(gc):
... '''Define empty arrays for each group.'''
... print('\\tpreparing group', gc.group)
... # Try to create new arrays as rarely as possible
... size = np.empty(gc.group_len, dtype=np.float_)
... size_type = np.empty(gc.group_len, dtype=np.int_)
... direction = np.empty(gc.group_len, dtype=np.int_)
... temp_float_arr = np.empty(gc.group_len, dtype=np.float_)
... return size, size_type, direction, temp_float_arr
>>> @njit
... def segment_prep_func_nb(sc, size, size_type, direction, temp_float_arr):
... '''Perform rebalancing at each segment.'''
... print('\\t\\tpreparing segment', sc.i, '(row)')
... for k in range(sc.group_len):
... col = sc.from_col + k
... size[k] = 1 / sc.group_len
... size_type[k] = SizeType.TargetPercent
... direction[k] = Direction.LongOnly # long positions only
... # Here we use order price instead of previous close to valuate the assets
... sc.last_val_price[col] = sc.close[sc.i, col]
... # Reorder call sequence such that selling orders come first and buying last
... auto_call_seq_ctx_nb(sc, size, size_type, direction, temp_float_arr)
... return size, size_type, direction
>>> @njit
... def order_func_nb(oc, size, size_type, direction, fees, fixed_fees, slippage):
... '''Place an order.'''
... print('\\t\\t\\trunning order', oc.call_idx, 'at column', oc.col)
... col_i = oc.call_seq_now[oc.call_idx] # or col - from_col
... return create_order_nb(
... size=size[col_i],
... size_type=size_type[col_i],
... direction=direction[col_i],
... price=oc.close[oc.i, oc.col],
... fees=fees, fixed_fees=fixed_fees, slippage=slippage
... )
>>> target_shape = (5, 3)
>>> np.random.seed(42)
>>> close = np.random.uniform(1, 10, size=target_shape)
>>> group_lens = np.array([3]) # one group of three columns
>>> init_cash = np.array([100.]) # one capital per group
>>> cash_sharing = True
>>> call_seq = build_call_seq(target_shape, group_lens) # will be overridden
>>> active_mask = np.array([True, False, True, False, True])[:, None]
>>> active_mask = np.copy(np.broadcast_to(active_mask, target_shape))
>>> fees = 0.001
>>> fixed_fees = 1.
>>> slippage = 0.001
>>> order_records, log_records = simulate_nb(
... target_shape,
... close,
... group_lens,
... init_cash,
... cash_sharing,
... call_seq,
... active_mask,
... prep_func_nb, (),
... group_prep_func_nb, (),
... segment_prep_func_nb, (),
... order_func_nb, (fees, fixed_fees, slippage),
... target_shape[0] * target_shape[1], 0
... )
preparing simulation
preparing group 0
preparing segment 0 (row)
running order 0 at column 0
running order 1 at column 1
running order 2 at column 2
preparing segment 2 (row)
running order 0 at column 1
running order 1 at column 2
running order 2 at column 0
preparing segment 4 (row)
running order 0 at column 0
running order 1 at column 2
running order 2 at column 1
>>> pd.DataFrame.from_records(order_records)
id idx col size price fees side
0 0 0 0 7.626262 4.375232 1.033367 0
1 1 0 1 3.488053 9.565985 1.033367 0
2 2 0 2 3.972040 7.595533 1.030170 0
3 3 2 1 0.920352 8.786790 1.008087 1
4 4 2 2 0.448747 6.403625 1.002874 1
5 5 2 0 5.210115 1.524275 1.007942 0
6 6 4 0 7.899568 8.483492 1.067016 1
7 7 4 2 12.378281 2.639061 1.032667 0
8 8 4 1 10.713236 2.913963 1.031218 0
>>> call_seq
array([[0, 1, 2],
[0, 1, 2],
[1, 2, 0],
[0, 1, 2],
[0, 2, 1]])
>>> col_map = col_map_nb(order_records['col'], target_shape[1])
>>> share_flow = share_flow_nb(target_shape, order_records, col_map, Direction.All)
>>> shares = shares_nb(share_flow)
>>> holding_value = holding_value_nb(close, shares)
>>> Scatter(data=holding_value).fig
```

Note that the last order in a group with cash sharing is always disadvantaged
as it has a bit less funds than the previous orders due to costs, which are not
included when valuating the group.
"""
check_group_lens(group_lens, target_shape[1])
check_group_init_cash(group_lens, target_shape[1], init_cash, cash_sharing)
order_records = np.empty(max_orders, dtype=order_dt)
ridx = 0
if max_logs == 0:
max_logs = 1
log_records = np.empty(max_logs, dtype=log_dt)
lidx = 0
last_cash = init_cash.astype(np.float_)
last_shares = np.full(target_shape[1], 0., dtype=np.float_)
last_val_price = np.full_like(last_shares, np.nan, dtype=np.float_)
# Run a function to prepare the simulation
simc = SimulationContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records,
log_records,
last_cash,
last_shares,
last_val_price
)
prep_out = prep_func_nb(simc, *prep_args)
from_col = 0
for group in range(len(group_lens)):
# Is this group active?
if np.any(active_mask[:, group]):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
# Run a function to preprocess this entire group
gc = GroupContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records[:ridx],
log_records[:lidx],
last_cash,
last_shares,
last_val_price,
group,
group_len,
from_col,
to_col
)
group_prep_out = group_prep_func_nb(gc, *prep_out, *group_prep_args)
for i in range(target_shape[0]):
# Is this row segment active?
if active_mask[i, group]:
# Update valuation price
if i > 0:
for col in range(from_col, to_col):
last_val_price[col] = close[i - 1, col]
# Run a function to preprocess this group within this row
call_seq_now = call_seq[i, from_col:to_col]
sc = SegmentContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records[:ridx],
log_records[:lidx],
last_cash,
last_shares,
last_val_price,
i,
group,
group_len,
from_col,
to_col,
call_seq_now
)
segment_prep_out = segment_prep_func_nb(sc, *group_prep_out, *segment_prep_args)
# Get running values per group
if cash_sharing:
cash_now = last_cash[group]
value_now = get_group_value_nb(from_col, to_col, cash_now, last_shares, last_val_price)
for k in range(group_len):
col_i = call_seq_now[k]
if col_i >= group_len:
raise ValueError("Call index exceeds bounds of the group")
col = from_col + col_i
# Get running values per column
shares_now = last_shares[col]
val_price_now = last_val_price[col]
if not cash_sharing:
cash_now = last_cash[col]
value_now = cash_now
if shares_now != 0:
value_now += shares_now * val_price_now
# Generate the next order
oc = OrderContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records[:ridx],
log_records[:lidx],
last_cash,
last_shares,
last_val_price,
i,
group,
group_len,
from_col,
to_col,
call_seq_now,
col,
k,
cash_now,
shares_now,
val_price_now,
value_now
)
order = order_func_nb(oc, *segment_prep_out, *order_args)
# Process the order
if lidx > len(log_records) - 1:
raise IndexError("log_records index out of range")
cash_now, shares_now, order_result = process_order_nb(
cash_now, shares_now, val_price_now, value_now, order, log_records[lidx])
if order.log:
# Add log metadata
log_records[lidx]['id'] = lidx
log_records[lidx]['idx'] = i
log_records[lidx]['col'] = col
log_records[lidx]['group'] = group
if order_result.status == OrderStatus.Filled:
log_records[lidx]['order_id'] = ridx
else:
log_records[lidx]['order_id'] = -1
lidx += 1
if order_result.status == OrderStatus.Filled:
# Add order metadata
if ridx > len(order_records) - 1:
raise IndexError("order_records index out of range")
order_records[ridx]['id'] = ridx
order_records[ridx]['idx'] = i
order_records[ridx]['col'] = col
order_records[ridx]['size'] = order_result.size
order_records[ridx]['price'] = order_result.price
order_records[ridx]['fees'] = order_result.fees
order_records[ridx]['side'] = order_result.side
ridx += 1
# Now becomes last
if cash_sharing:
last_cash[group] = cash_now
else:
last_cash[col] = cash_now
last_shares[col] = shares_now
from_col = to_col
return order_records[:ridx], log_records[:lidx]
@njit
def simulate_row_wise_nb(target_shape, close, group_lens, init_cash, cash_sharing, call_seq,
active_mask, prep_func_nb, prep_args, row_prep_func_nb, row_prep_args,
segment_prep_func_nb, segment_prep_args, order_func_nb, order_args,
max_orders, max_logs):
"""Same as `simulate_nb`, but iterates using row-major order, with the rows
changing fastest, and the columns/groups changing slowest.
The main difference is that instead of `group_prep_func_nb` it now exposes `row_prep_func_nb`,
which is executed per entire row. It should accept `vectorbt.portfolio.enums.RowContext`.
!!! note
Function `row_prep_func_nb` is only called if there is at least on active segment in
the row. Functions `segment_prep_func_nb` and `order_func_nb` are only called if their
segment is active. If the main task of `row_prep_func_nb` is to activate/deactivate segments,
all segments should be activated by default to allow `row_prep_func_nb` to be called.
!!! warning
You can only safely access data points that are to the left of the current group and
rows that are to the top of the current row.
## Example
Running the same example as in `simulate_nb` but replacing `group_prep_func_nb` for
`row_prep_func_nb` gives the same results but now the following call hierarchy:
```python-repl
preparing simulation
preparing row 0
preparing segment 0 (group)
running order 0 at column 0
running order 1 at column 1
running order 2 at column 2
preparing row 2
preparing segment 0 (group)
running order 0 at column 1
running order 1 at column 2
running order 2 at column 0
preparing row 4
preparing segment 0 (group)
running order 0 at column 0
running order 1 at column 2
running order 2 at column 1
```
Note, however, that we cannot create NumPy arrays per group anymore as there is no
`group_prep_func_nb`, so you would need to move this part to `prep_func_nb`,
make arrays wider, and use only the part of the array that corresponds to the current group.
"""
check_group_lens(group_lens, target_shape[1])
check_group_init_cash(group_lens, target_shape[1], init_cash, cash_sharing)
order_records = np.empty(max_orders, dtype=order_dt)
ridx = 0
if max_logs == 0:
max_logs = 1
log_records = np.empty(max_logs, dtype=log_dt)
lidx = 0
last_cash = init_cash.astype(np.float_)
last_shares = np.full(target_shape[1], 0., dtype=np.float_)
last_val_price = np.full_like(last_shares, np.nan, dtype=np.float_)
# Run a function to prepare the simulation
simc = SimulationContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records,
log_records,
last_cash,
last_shares,
last_val_price
)
prep_out = prep_func_nb(simc, *prep_args)
for i in range(target_shape[0]):
# Is this row active?
if np.any(active_mask[i, :]):
# Update valuation price
if i > 0:
for col in range(target_shape[1]):
last_val_price[col] = close[i - 1, col]
# Run a function to preprocess this entire row
rc = RowContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records[:ridx],
log_records[:lidx],
last_cash,
last_shares,
last_val_price,
i
)
row_prep_out = row_prep_func_nb(rc, *prep_out, *row_prep_args)
from_col = 0
for group in range(len(group_lens)):
# Is this group segment active?
if active_mask[i, group]:
to_col = from_col + group_lens[group]
group_len = to_col - from_col
# Run a function to preprocess this row within this group
call_seq_now = call_seq[i, from_col:to_col]
sc = SegmentContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records[:ridx],
log_records[:lidx],
last_cash,
last_shares,
last_val_price,
i,
group,
group_len,
from_col,
to_col,
call_seq_now
)
segment_prep_out = segment_prep_func_nb(sc, *row_prep_out, *segment_prep_args)
# Get running values per group
if cash_sharing:
cash_now = last_cash[group]
value_now = get_group_value_nb(from_col, to_col, cash_now, last_shares, last_val_price)
for k in range(group_len):
col_i = call_seq_now[k]
if col_i >= group_len:
raise ValueError("Call index exceeds bounds of the group")
col = from_col + col_i
# Get running values per column
shares_now = last_shares[col]
val_price_now = last_val_price[col]
if not cash_sharing:
cash_now = last_cash[col]
value_now = cash_now
if shares_now != 0:
value_now += shares_now * val_price_now
# Generate the next order
oc = OrderContext(
target_shape,
close,
group_lens,
init_cash,
cash_sharing,
call_seq,
active_mask,
order_records[:ridx],
log_records[:lidx],
last_cash,
last_shares,
last_val_price,
i,
group,
group_len,
from_col,
to_col,
call_seq_now,
col,
k,
cash_now,
shares_now,
val_price_now,
value_now
)
order = order_func_nb(oc, *segment_prep_out, *order_args)
# Process the order
if lidx > len(log_records) - 1:
raise IndexError("log_records index out of range")
cash_now, shares_now, order_result = process_order_nb(
cash_now, shares_now, val_price_now, value_now, order, log_records[lidx])
if order.log:
# Add log metadata
log_records[lidx]['id'] = lidx
log_records[lidx]['idx'] = i
log_records[lidx]['col'] = col
log_records[lidx]['group'] = group
if order_result.status == OrderStatus.Filled:
log_records[lidx]['order_id'] = ridx
else:
log_records[lidx]['order_id'] = -1
lidx += 1
if order_result.status == OrderStatus.Filled:
# Add order metadata
if ridx > len(order_records) - 1:
raise IndexError("order_records index out of range")
order_records[ridx]['id'] = ridx
order_records[ridx]['idx'] = i
order_records[ridx]['col'] = col
order_records[ridx]['size'] = order_result.size
order_records[ridx]['price'] = order_result.price
order_records[ridx]['fees'] = order_result.fees
order_records[ridx]['side'] = order_result.side
ridx += 1
# Now becomes last
if cash_sharing:
last_cash[group] = cash_now
else:
last_cash[col] = cash_now
last_shares[col] = shares_now
from_col = to_col
return order_records[:ridx], log_records[:lidx]
@njit(cache=True)
def simulate_from_orders_nb(target_shape, group_lens, init_cash, call_seq, auto_call_seq,
size, size_type, direction, price, fees, fixed_fees, slippage,
min_size, max_size, reject_prob, allow_partial, raise_reject,
log, val_price, max_orders, max_logs, flex_2d):
"""Adaptation of `simulate_nb` for simulation based on orders.
Utilizes flexible broadcasting.
!!! note
Should be only grouped if cash sharing is enabled.
If `auto_call_seq` is True, make sure that `call_seq` follows `CallSeqType.Default`."""
check_group_lens(group_lens, target_shape[1])
cash_sharing = is_grouped_nb(group_lens)
check_group_init_cash(group_lens, target_shape[1], init_cash, cash_sharing)
order_records = np.empty(max_orders, dtype=order_dt)
ridx = 0
if max_logs == 0:
max_logs = 1
log_records = np.empty(max_logs, dtype=log_dt)
lidx = 0
last_cash = init_cash.astype(np.float_)
last_shares = np.full(target_shape[1], 0., dtype=np.float_)
temp_order_value = np.empty(target_shape[1], dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
# Get running values per group
if cash_sharing:
cash_now = last_cash[group]
for i in range(target_shape[0]):
# Calculate group value and rearrange if cash sharing is enabled
if cash_sharing:
# Same as get_group_value_ctx_nb but with flexible indexing
value_now = cash_now
for k in range(group_len):
col = from_col + k
if last_shares[col] != 0:
_val_price = flex_select_auto_nb(i, col, val_price, flex_2d)
value_now += last_shares[col] * _val_price
# Dynamically sort by order value -> selling comes first to release funds early
if auto_call_seq:
# Same as sort_by_order_value_ctx_nb but with flexible indexing
for k in range(group_len):
col = from_col + k
temp_order_value[k] = approx_order_value_nb(
flex_select_auto_nb(i, col, size, flex_2d),
flex_select_auto_nb(i, col, size_type, flex_2d),
cash_now,
last_shares[col],
flex_select_auto_nb(i, col, val_price, flex_2d),
value_now,
flex_select_auto_nb(i, col, direction, flex_2d)
)
# Sort by order value
insert_argsort_nb(temp_order_value[:group_len], call_seq[i, from_col:to_col])
for k in range(group_len):
col = from_col + k
if cash_sharing:
col_i = call_seq[i, col]
if col_i >= group_len:
raise ValueError("Call index exceeds bounds of the group")
col = from_col + col_i
# Get running values per column
shares_now = last_shares[col]
val_price_now = flex_select_auto_nb(i, col, val_price, flex_2d)
if not cash_sharing:
cash_now = last_cash[col]
value_now = cash_now
if shares_now != 0:
value_now += shares_now * val_price_now
# Generate the next order
order = create_order_nb(
size=flex_select_auto_nb(i, col, size, flex_2d),
size_type=flex_select_auto_nb(i, col, size_type, flex_2d),
direction=flex_select_auto_nb(i, col, direction, flex_2d),
price=flex_select_auto_nb(i, col, price, flex_2d),
fees=flex_select_auto_nb(i, col, fees, flex_2d),
fixed_fees=flex_select_auto_nb(i, col, fixed_fees, flex_2d),
slippage=flex_select_auto_nb(i, col, slippage, flex_2d),
min_size=flex_select_auto_nb(i, col, min_size, flex_2d),
max_size=flex_select_auto_nb(i, col, max_size, flex_2d),
reject_prob=flex_select_auto_nb(i, col, reject_prob, flex_2d),
allow_partial=flex_select_auto_nb(i, col, allow_partial, flex_2d),
raise_reject=flex_select_auto_nb(i, col, raise_reject, flex_2d),
log=flex_select_auto_nb(i, col, log, flex_2d)
)
# Process the order
if lidx > len(log_records) - 1:
raise IndexError("log_records index out of range")
cash_now, shares_now, order_result = process_order_nb(
cash_now, shares_now, val_price_now, value_now, order, log_records[lidx])
if order.log:
# Add log metadata
log_records[lidx]['id'] = lidx
log_records[lidx]['idx'] = i
log_records[lidx]['col'] = col
log_records[lidx]['group'] = group
if order_result.status == OrderStatus.Filled:
log_records[lidx]['order_id'] = ridx
else:
log_records[lidx]['order_id'] = -1
lidx += 1
if order_result.status == OrderStatus.Filled:
# Add order metadata
if ridx > len(order_records) - 1:
raise IndexError("order_records index out of range")
order_records[ridx]['id'] = ridx
order_records[ridx]['idx'] = i
order_records[ridx]['col'] = col
order_records[ridx]['size'] = order_result.size
order_records[ridx]['price'] = order_result.price
order_records[ridx]['fees'] = order_result.fees
order_records[ridx]['side'] = order_result.side
ridx += 1
# Now becomes last
if cash_sharing:
last_cash[group] = cash_now
else:
last_cash[col] = cash_now
last_shares[col] = shares_now
from_col = to_col
return order_records[:ridx], log_records[:lidx]
@njit(cache=True)
def signals_get_size_nb(shares_now, is_entry, is_exit, size, size_type, direction,
accumulate, conflict_mode, close_first):
"""Get order size given signals."""
if size_type != SizeType.Shares and size_type != SizeType.Percent:
raise ValueError("Only SizeType.Shares and SizeType.Percent are supported")
order_size = 0.
abs_shares_now = abs(shares_now)
abs_size = abs(size)
if is_entry and is_exit:
# Conflict
if conflict_mode == ConflictMode.Entry:
# Ignore exit signal
is_exit = False
elif conflict_mode == ConflictMode.Exit:
# Ignore entry signal
is_entry = False
elif conflict_mode == ConflictMode.Opposite:
# Take the signal opposite to the position we are in
if shares_now == 0:
# Cannot decide -> ignore
is_entry = False
is_exit = False
else:
if direction == Direction.All:
if shares_now > 0:
is_entry = False
elif shares_now < 0:
is_exit = False
else:
is_entry = False
else:
is_entry = False
is_exit = False
if is_entry:
if direction == Direction.All:
# Behaves like Direction.LongOnly
if accumulate:
order_size = abs_size
else:
if shares_now < 0:
# Reverse short position
if close_first:
order_size = abs_shares_now
size_type = SizeType.Shares
else:
if size_type == SizeType.Percent:
raise ValueError("SizeType.Percent does not support position reversal")
order_size = abs_shares_now + abs_size
elif shares_now == 0:
# Open long position
order_size = abs_size
elif direction == Direction.LongOnly:
if shares_now == 0 or accumulate:
# Open or increase long position
order_size = abs_size
else:
if shares_now == 0 or accumulate:
# Open or increase short position
order_size = -abs_size
elif is_exit:
if direction == Direction.All:
# Behaves like Direction.ShortOnly
if accumulate:
order_size = -abs_size
else:
if shares_now > 0:
# Reverse long position
if close_first:
order_size = -abs_shares_now
size_type = SizeType.Shares
else:
if size_type == SizeType.Percent:
raise ValueError("SizeType.Percent does not support position reversal")
order_size = -abs_shares_now - abs_size
elif shares_now == 0:
# Open short position
order_size = -abs_size
elif direction == Direction.ShortOnly:
if shares_now < 0:
if accumulate:
# Reduce short position
order_size = abs_size
else:
# Close short position
order_size = abs_shares_now
size_type = SizeType.Shares
else:
if shares_now > 0:
if accumulate:
# Reduce long position
order_size = -abs_size
else:
# Close long position
order_size = -abs_shares_now
size_type = SizeType.Shares
return order_size, size_type
@njit(cache=True)
def simulate_from_signals_nb(target_shape, group_lens, init_cash, call_seq, auto_call_seq,
entries, exits, size, size_type, direction, price, fees, fixed_fees,
slippage, min_size, max_size, reject_prob, allow_partial, raise_reject,
accumulate, log, conflict_mode, close_first, val_price, max_orders,
max_logs, flex_2d):
"""Adaptation of `simulate_nb` for simulation based on entry and exit signals.
Utilizes flexible broadcasting.
!!! note
Should be only grouped if cash sharing is enabled."""
check_group_lens(group_lens, target_shape[1])
cash_sharing = is_grouped_nb(group_lens)
check_group_init_cash(group_lens, target_shape[1], init_cash, cash_sharing)
order_records = np.empty(max_orders, dtype=order_dt)
ridx = 0
if max_logs == 0:
max_logs = 1
log_records = np.empty(max_logs, dtype=log_dt)
lidx = 0
last_cash = init_cash.astype(np.float_)
last_shares = np.full(target_shape[1], 0., dtype=np.float_)
order_size = np.empty(target_shape[1], dtype=np.float_)
order_size_type = np.empty(target_shape[1], dtype=np.float_)
temp_order_value = np.empty(target_shape[1], dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
# Get running values per group
if cash_sharing:
cash_now = last_cash[group]
for i in range(target_shape[0]):
# Get size and value of each order
for k in range(group_len):
col = from_col + k # order doesn't matter
_order_size, _order_size_type = signals_get_size_nb(
last_shares[col],
flex_select_auto_nb(i, col, entries, flex_2d),
flex_select_auto_nb(i, col, exits, flex_2d),
flex_select_auto_nb(i, col, size, flex_2d),
flex_select_auto_nb(i, col, size_type, flex_2d),
flex_select_auto_nb(i, col, direction, flex_2d),
flex_select_auto_nb(i, col, accumulate, flex_2d),
flex_select_auto_nb(i, col, conflict_mode, flex_2d),
flex_select_auto_nb(i, col, close_first, flex_2d)
) # already takes into account direction
order_size[col] = _order_size
order_size_type[col] = _order_size_type
if cash_sharing:
if _order_size == 0:
temp_order_value[k] = 0.
else:
_val_price = flex_select_auto_nb(i, col, val_price, flex_2d)
# Approximate order value
if _order_size_type == SizeType.Shares:
temp_order_value[k] = _order_size * _val_price
else:
if _order_size > 0:
temp_order_value[k] = _order_size * cash_now
else:
holding_value_now = last_shares[col] * _val_price
temp_order_value[k] = _order_size * abs(holding_value_now)
if cash_sharing:
# Dynamically sort by order value -> selling comes first to release funds early
if auto_call_seq:
insert_argsort_nb(temp_order_value[:group_len], call_seq[i, from_col:to_col])
# Same as get_group_value_ctx_nb but with flexible indexing
value_now = cash_now
for k in range(group_len):
col = from_col + k
if last_shares[col] != 0:
_val_price = flex_select_auto_nb(i, col, val_price, flex_2d)
value_now += last_shares[col] * _val_price
for k in range(group_len):
col = from_col + k
if cash_sharing:
col_i = call_seq[i, col]
if col_i >= group_len:
raise ValueError("Call index exceeds bounds of the group")
col = from_col + col_i
# Get running values per column
shares_now = last_shares[col]
val_price_now = flex_select_auto_nb(i, col, val_price, flex_2d)
if not cash_sharing:
cash_now = last_cash[col]
value_now = cash_now
if shares_now != 0:
value_now += shares_now * val_price_now
# Generate the next order
_order_size = order_size[col] # already takes into account direction
_order_size_type = order_size_type[col]
if _order_size != 0:
if _order_size > 0: # long order
_direction = flex_select_auto_nb(i, col, direction, flex_2d)
if _direction == Direction.ShortOnly:
_order_size *= -1 # must reverse for process_order_nb
else: # short order
_direction = flex_select_auto_nb(i, col, direction, flex_2d)
if _direction == Direction.ShortOnly:
_order_size *= -1
order = create_order_nb(
size=_order_size,
size_type=_order_size_type,
direction=_direction,
price=flex_select_auto_nb(i, col, price, flex_2d),
fees=flex_select_auto_nb(i, col, fees, flex_2d),
fixed_fees=flex_select_auto_nb(i, col, fixed_fees, flex_2d),
slippage=flex_select_auto_nb(i, col, slippage, flex_2d),
min_size=flex_select_auto_nb(i, col, min_size, flex_2d),
max_size=flex_select_auto_nb(i, col, max_size, flex_2d),
reject_prob=flex_select_auto_nb(i, col, reject_prob, flex_2d),
allow_partial=flex_select_auto_nb(i, col, allow_partial, flex_2d),
raise_reject=flex_select_auto_nb(i, col, raise_reject, flex_2d),
log=flex_select_auto_nb(i, col, log, flex_2d)
)
# Process the order
if lidx > len(log_records) - 1:
raise IndexError("log_records index out of range")
cash_now, shares_now, order_result = process_order_nb(
cash_now, shares_now, val_price_now, value_now, order, log_records[lidx])
if order.log:
# Add log metadata
log_records[lidx]['id'] = lidx
log_records[lidx]['idx'] = i
log_records[lidx]['col'] = col
log_records[lidx]['group'] = group
if order_result.status == OrderStatus.Filled:
log_records[lidx]['order_id'] = ridx
else:
log_records[lidx]['order_id'] = -1
lidx += 1
if order_result.status == OrderStatus.Filled:
# Add order metadata
if ridx > len(order_records) - 1:
raise IndexError("order_records index out of range")
order_records[ridx]['id'] = ridx
order_records[ridx]['idx'] = i
order_records[ridx]['col'] = col
order_records[ridx]['size'] = order_result.size
order_records[ridx]['price'] = order_result.price
order_records[ridx]['fees'] = order_result.fees
order_records[ridx]['side'] = order_result.side
ridx += 1
# Now becomes last
if cash_sharing:
last_cash[group] = cash_now
else:
last_cash[col] = cash_now
last_shares[col] = shares_now
from_col = to_col
return order_records[:ridx], log_records[:lidx]
# ############# Trades ############# #
@njit(cache=True)
def trade_duration_map_nb(record):
"""`map_func_nb` that returns trade duration."""
return record['exit_idx'] - record['entry_idx']
@njit(cache=True)
def get_trade_stats_nb(size, entry_price, entry_fees, exit_price, exit_fees, direction):
"""Get trade statistics."""
entry_val = size * entry_price
exit_val = size * exit_price
val_diff = add_nb(exit_val, -entry_val)
if val_diff != 0 and direction == TradeDirection.Short:
val_diff *= -1
pnl = val_diff - entry_fees - exit_fees
ret = pnl / entry_val
return pnl, ret
size_zero_neg_err = "Found order with size 0 or less"
price_zero_neg_err = "Found order with price 0 or less"
@njit(cache=True)
def save_trade_nb(record, col,
entry_idx, entry_size_sum, entry_gross_sum, entry_fees_sum,
exit_idx, exit_size, exit_price, exit_fees,
direction, status, position_id):
"""Save trade to the record."""
# Size-weighted average of price
entry_price = entry_gross_sum / entry_size_sum
# Fraction of fees
size_fraction = exit_size / entry_size_sum
entry_fees = size_fraction * entry_fees_sum
# Get P&L and return
pnl, ret = get_trade_stats_nb(
exit_size,
entry_price,
entry_fees,
exit_price,
exit_fees,
direction
)
# Save trade
record['col'] = col
record['size'] = exit_size
record['entry_idx'] = entry_idx
record['entry_price'] = entry_price
record['entry_fees'] = entry_fees
record['exit_idx'] = exit_idx
record['exit_price'] = exit_price
record['exit_fees'] = exit_fees
record['pnl'] = pnl
record['return'] = ret
record['direction'] = direction
record['status'] = status
record['position_id'] = position_id
@njit(cache=True)
def orders_to_trades_nb(close, order_records, col_map):
"""Find trades and store their information as records to an array.
## Example
Simulate a strategy and find all trades in generated orders:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from vectorbt.records.nb import col_map_nb
>>> from vectorbt.portfolio.nb import (
... simulate_nb,
... create_order_nb,
... empty_prep_nb,
... orders_to_trades_nb
... )
>>> @njit
... def order_func_nb(oc, order_size, order_price):
... return create_order_nb(
... size=order_size[oc.i, oc.col],
... price=order_price[oc.i, oc.col],
... fees=0.01, slippage=0.01
... )
>>> order_size = np.asarray([
... [1, -1],
... [0.1, -0.1],
... [-1, 1],
... [-0.1, 0.1],
... [1, -1],
... [-2, 2]
... ])
>>> close = order_price = np.array([
... [1, 6],
... [2, 5],
... [3, 4],
... [4, 3],
... [5, 2],
... [6, 1]
... ])
>>> target_shape = order_size.shape
>>> group_lens = np.full(target_shape[1], 1)
>>> init_cash = np.full(target_shape[1], 100)
>>> cash_sharing = False
>>> call_seq = np.full(target_shape, 0)
>>> active_mask = np.full(target_shape, True)
>>> order_records, log_records = simulate_nb(
... target_shape, close, group_lens,
... init_cash, cash_sharing, call_seq, active_mask,
... empty_prep_nb, (),
... empty_prep_nb, (),
... empty_prep_nb, (),
... order_func_nb, (order_size, order_price),
... target_shape[0] * target_shape[1], 0
... )
>>> col_map = col_map_nb(order_records['col'], target_shape[1])
>>> trade_records = orders_to_trades_nb(close, order_records, col_map)
>>> print(pd.DataFrame.from_records(trade_records))
id col size entry_idx entry_price entry_fees exit_idx exit_price \\
0 0 0 1.0 0 1.101818 0.011018 2 2.97
1 1 0 0.1 0 1.101818 0.001102 3 3.96
2 2 0 1.0 4 5.050000 0.050500 5 5.94
3 3 0 1.0 5 5.940000 0.059400 5 6.00
4 4 1 1.0 0 5.850000 0.058500 2 4.04
5 5 1 0.1 0 5.850000 0.005850 3 3.03
6 6 1 1.0 4 1.980000 0.019800 5 1.01
7 7 1 1.0 5 1.010000 0.010100 5 1.00
exit_fees pnl return direction status position_id
0 0.02970 1.827464 1.658589 0 1 0
1 0.00396 0.280756 2.548119 0 1 0
2 0.05940 0.780100 0.154475 0 1 1
3 0.00000 -0.119400 -0.020101 1 0 2
4 0.04040 1.711100 0.292496 1 1 3
5 0.00303 0.273120 0.466872 1 1 3
6 0.01010 0.940100 0.474798 1 1 4
7 0.00000 -0.020100 -0.019901 0 0 5
```
"""
col_idxs, col_lens = col_map
col_start_idxs = np.cumsum(col_lens) - col_lens
records = np.empty(len(order_records), dtype=trade_dt)
ridx = 0
entry_size_sum = 0.
entry_gross_sum = 0.
entry_fees_sum = 0.
position_id = -1
for col in range(col_lens.shape[0]):
col_len = col_lens[col]
if col_len == 0:
continue
entry_idx = -1
direction = -1
last_id = -1
for i in range(col_len):
r = col_idxs[col_start_idxs[col] + i]
record = order_records[r]
if record['id'] < last_id:
raise ValueError("id must come in ascending order per column")
last_id = record['id']
i = record['idx']
order_size = record['size']
order_price = record['price']
order_fees = record['fees']
order_side = record['side']
if order_size <= 0.:
raise ValueError(size_zero_neg_err)
if order_price <= 0.:
raise ValueError(price_zero_neg_err)
if entry_idx == -1:
# Trade opened
entry_idx = i
if order_side == OrderSide.Buy:
direction = TradeDirection.Long
else:
direction = TradeDirection.Short
position_id += 1
# Reset running vars for a new position
entry_size_sum = 0.
entry_gross_sum = 0.
entry_fees_sum = 0.
if (direction == TradeDirection.Long and order_side == OrderSide.Buy) \
or (direction == TradeDirection.Short and order_side == OrderSide.Sell):
# Position increased
entry_size_sum += order_size
entry_gross_sum += order_size * order_price
entry_fees_sum += order_fees
elif (direction == TradeDirection.Long and order_side == OrderSide.Sell) \
or (direction == TradeDirection.Short and order_side == OrderSide.Buy):
if is_close_or_less_nb(order_size, entry_size_sum):
# Trade closed
if is_close_nb(order_size, entry_size_sum):
exit_size = entry_size_sum
else:
exit_size = order_size
exit_price = order_price
exit_fees = order_fees
exit_idx = i
save_trade_nb(
records[ridx],
col,
entry_idx,
entry_size_sum,
entry_gross_sum,
entry_fees_sum,
exit_idx,
exit_size,
exit_price,
exit_fees,
direction,
TradeStatus.Closed,
position_id
)
records[ridx]['id'] = ridx
ridx += 1
if is_close_nb(order_size, entry_size_sum):
# Position closed
entry_idx = -1
direction = -1
else:
# Position decreased, previous orders have now less impact
size_fraction = (entry_size_sum - order_size) / entry_size_sum
entry_size_sum *= size_fraction
entry_gross_sum *= size_fraction
entry_fees_sum *= size_fraction
else:
# Trade reversed
# Close current trade
cl_exit_size = entry_size_sum
cl_exit_price = order_price
cl_exit_fees = cl_exit_size / order_size * order_fees
cl_exit_idx = i
save_trade_nb(
records[ridx],
col,
entry_idx,
entry_size_sum,
entry_gross_sum,
entry_fees_sum,
cl_exit_idx,
cl_exit_size,
cl_exit_price,
cl_exit_fees,
direction,
TradeStatus.Closed,
position_id
)
records[ridx]['id'] = ridx
ridx += 1
# Open a new trade
entry_size_sum = order_size - cl_exit_size
entry_gross_sum = entry_size_sum * order_price
entry_fees_sum = order_fees - cl_exit_fees
entry_idx = i
if direction == TradeDirection.Long:
direction = TradeDirection.Short
else:
direction = TradeDirection.Long
position_id += 1
if entry_idx != -1 and is_less_nb(-entry_size_sum, 0):
# Trade in the previous column hasn't been closed
exit_size = entry_size_sum
exit_price = close[close.shape[0] - 1, col]
exit_fees = 0.
exit_idx = close.shape[0] - 1
save_trade_nb(
records[ridx],
col,
entry_idx,
entry_size_sum,
entry_gross_sum,
entry_fees_sum,
exit_idx,
exit_size,
exit_price,
exit_fees,
direction,
TradeStatus.Open,
position_id
)
records[ridx]['id'] = ridx
ridx += 1
return records[:ridx]
# ############# Positions ############# #
@njit(cache=True)
def save_position_nb(record, trade_records):
"""Save position to the record."""
# Aggregate trades
col = trade_records['col'][0]
size = np.sum(trade_records['size'])
entry_idx = trade_records['entry_idx'][0]
entry_price = np.sum(trade_records['size'] * trade_records['entry_price']) / size
entry_fees = np.sum(trade_records['entry_fees'])
exit_idx = trade_records['exit_idx'][-1]
exit_price = np.sum(trade_records['size'] * trade_records['exit_price']) / size
exit_fees = np.sum(trade_records['exit_fees'])
direction = trade_records['direction'][-1]
status = trade_records['status'][-1]
pnl, ret = get_trade_stats_nb(
size,
entry_price,
entry_fees,
exit_price,
exit_fees,
direction
)
# Save position
record['col'] = col
record['size'] = size
record['entry_idx'] = entry_idx
record['entry_price'] = entry_price
record['entry_fees'] = entry_fees
record['exit_idx'] = exit_idx
record['exit_price'] = exit_price
record['exit_fees'] = exit_fees
record['pnl'] = pnl
record['return'] = ret
record['direction'] = direction
record['status'] = status
@njit(cache=True)
def copy_trade_record_nb(position_record, trade_record):
# Save position
position_record['col'] = trade_record['col']
position_record['size'] = trade_record['size']
position_record['entry_idx'] = trade_record['entry_idx']
position_record['entry_price'] = trade_record['entry_price']
position_record['entry_fees'] = trade_record['entry_fees']
position_record['exit_idx'] = trade_record['exit_idx']
position_record['exit_price'] = trade_record['exit_price']
position_record['exit_fees'] = trade_record['exit_fees']
position_record['pnl'] = trade_record['pnl']
position_record['return'] = trade_record['return']
position_record['direction'] = trade_record['direction']
position_record['status'] = trade_record['status']
@njit(cache=True)
def trades_to_positions_nb(trade_records, col_map):
"""Find positions and store their information as records to an array.
## Example
Building upon the example in `orders_to_trades_nb`, convert trades to positions:
```python-repl
>>> from vectorbt.portfolio.nb import trades_to_positions_nb
>>> col_map = col_map_nb(trade_records['col'], target_shape[1])
>>> position_records = trades_to_positions_nb(trade_records, col_map)
>>> pd.DataFrame.from_records(position_records)
id col size entry_idx entry_price entry_fees exit_idx exit_price \\
0 0 0 1.1 0 1.101818 0.01212 3 3.060000
1 1 0 1.0 4 5.050000 0.05050 5 5.940000
2 2 0 1.0 5 5.940000 0.05940 5 6.000000
3 3 1 1.1 0 5.850000 0.06435 3 3.948182
4 4 1 1.0 4 1.980000 0.01980 5 1.010000
5 5 1 1.0 5 1.010000 0.01010 5 1.000000
exit_fees pnl return direction status
0 0.03366 2.10822 1.739455 0 1
1 0.05940 0.78010 0.154475 0 1
2 0.00000 -0.11940 -0.020101 1 0
3 0.04343 1.98422 0.308348 1 1
4 0.01010 0.94010 0.474798 1 1
5 0.00000 -0.02010 -0.019901 0 0
```
"""
col_idxs, col_lens = col_map
col_start_idxs = np.cumsum(col_lens) - col_lens
records = np.empty(len(trade_records), dtype=position_dt)
ridx = 0
from_r = -1
for col in range(col_lens.shape[0]):
col_len = col_lens[col]
if col_len == 0:
continue
last_id = -1
last_position_id = -1
for i in range(col_len):
r = col_idxs[col_start_idxs[col] + i]
record = trade_records[r]
if record['id'] < last_id:
raise ValueError("id must come in ascending order per column")
last_id = record['id']
position_id = record['position_id']
if position_id != last_position_id:
if last_position_id != -1:
if r - from_r > 1:
save_position_nb(records[ridx], trade_records[from_r:r])
else:
# Speed up
copy_trade_record_nb(records[ridx], trade_records[from_r])
records[ridx]['id'] = ridx
ridx += 1
from_r = r
last_position_id = position_id
if r - from_r > 0:
save_position_nb(records[ridx], trade_records[from_r:r + 1])
else:
# Speed up
copy_trade_record_nb(records[ridx], trade_records[from_r])
records[ridx]['id'] = ridx
ridx += 1
return records[:ridx]
# ############# Shares ############# #
@njit(cache=True)
def get_long_size_nb(shares_now, new_shares_now):
"""Get long size."""
if shares_now <= 0 and new_shares_now <= 0:
return 0.
if shares_now >= 0 and new_shares_now < 0:
return -shares_now
if shares_now < 0 and new_shares_now >= 0:
return new_shares_now
return add_nb(new_shares_now, -shares_now)
@njit(cache=True)
def get_short_size_nb(shares_now, new_shares_now):
"""Get short size."""
if shares_now >= 0 and new_shares_now >= 0:
return 0.
if shares_now >= 0 and new_shares_now < 0:
return -new_shares_now
if shares_now < 0 and new_shares_now >= 0:
return shares_now
return add_nb(shares_now, -new_shares_now)
@njit(cache=True)
def share_flow_nb(target_shape, order_records, col_map, direction):
"""Get share flow series per column. Has opposite sign."""
col_idxs, col_lens = col_map
col_start_idxs = np.cumsum(col_lens) - col_lens
out = np.full(target_shape, 0., dtype=np.float_)
for col in range(col_lens.shape[0]):
col_len = col_lens[col]
if col_len == 0:
continue
last_id = -1
shares_now = 0.
for i in range(col_len):
r = col_idxs[col_start_idxs[col] + i]
record = order_records[r]
if record['id'] < last_id:
raise ValueError("id must come in ascending order per column")
last_id = record['id']
i = record['idx']
side = record['side']
size = record['size']
if side == OrderSide.Sell:
size *= -1
new_shares_now = add_nb(shares_now, size)
if direction == Direction.LongOnly:
out[i, col] += get_long_size_nb(shares_now, new_shares_now)
elif direction == Direction.ShortOnly:
out[i, col] += get_short_size_nb(shares_now, new_shares_now)
else:
out[i, col] += size
shares_now = new_shares_now
return out
@njit(cache=True)
def shares_nb(share_flow):
"""Get share series per column."""
out = np.empty_like(share_flow)
for col in range(share_flow.shape[1]):
shares_now = 0.
for i in range(share_flow.shape[0]):
flow_value = share_flow[i, col]
shares_now = add_nb(shares_now, flow_value)
out[i, col] = shares_now
return out
@njit(cache=True)
def i_group_any_reduce_nb(i, group, a):
"""Boolean "any" reducer for grouped columns."""
return np.any(a)
@njit
def pos_mask_grouped_nb(pos_mask, group_lens):
"""Get number of columns in position for each row and group."""
return generic_nb.squeeze_grouped_nb(pos_mask, group_lens, i_group_any_reduce_nb).astype(np.bool_)
@njit(cache=True)
def group_mean_reduce_nb(group, a):
"""Mean reducer for grouped columns."""
return np.mean(a)
@njit
def pos_coverage_grouped_nb(pos_mask, group_lens):
"""Get coverage of position for each row and group."""
return generic_nb.reduce_grouped_nb(pos_mask, group_lens, group_mean_reduce_nb)
# ############# Cash ############# #
@njit(cache=True)
def cash_flow_nb(target_shape, order_records, col_map, short_cash):
"""Get cash flow series per column."""
col_idxs, col_lens = col_map
col_start_idxs = np.cumsum(col_lens) - col_lens
out = np.full(target_shape, 0., dtype=np.float_)
for col in range(col_lens.shape[0]):
col_len = col_lens[col]
if col_len == 0:
continue
last_id = -1
shares_now = 0.
debt_now = 0.
for i in range(col_len):
r = col_idxs[col_start_idxs[col] + i]
record = order_records[r]
if record['id'] < last_id:
raise ValueError("id must come in ascending order per column")
last_id = record['id']
i = record['idx']
side = record['side']
size = record['size']
price = record['price']
fees = record['fees']
volume = size * price
if side == OrderSide.Sell:
size *= -1
new_shares_now = add_nb(shares_now, size)
shorted_size = get_short_size_nb(shares_now, new_shares_now)
if not short_cash and shorted_size != 0:
if shorted_size > 0:
debt_now += shorted_size * price
out[i, col] += add_nb(volume, -2 * shorted_size * price)
else:
if is_close_nb(volume, debt_now):
volume = debt_now
if volume >= debt_now:
out[i, col] += add_nb(2 * debt_now, -volume)
debt_now = 0.
else:
out[i, col] += volume
debt_now -= volume
else:
if side == OrderSide.Buy:
out[i, col] -= volume
else:
out[i, col] += volume
out[i, col] -= fees
shares_now = new_shares_now
return out
@njit(cache=True)
def cash_flow_grouped_nb(cash_flow, group_lens):
"""Get cash flow series per group."""
check_group_lens(group_lens, cash_flow.shape[1])
out = np.empty((cash_flow.shape[0], len(group_lens)), dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
out[:, group] = np.sum(cash_flow[:, from_col:to_col], axis=1)
from_col = to_col
return out
@njit(cache=True)
def init_cash_grouped_nb(init_cash, group_lens, cash_sharing):
"""Get initial cash per group."""
if cash_sharing:
return init_cash
out = np.empty(group_lens.shape, dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
cash_sum = 0.
for col in range(from_col, to_col):
cash_sum += init_cash[col]
out[group] = cash_sum
from_col = to_col
return out
@njit(cache=True)
def init_cash_nb(init_cash, group_lens, cash_sharing):
"""Get initial cash per column."""
if not cash_sharing:
return init_cash
group_lens_cs = np.cumsum(group_lens)
out = np.full(group_lens_cs[-1], np.nan, dtype=np.float_)
out[group_lens_cs - group_lens] = init_cash
out = generic_nb.ffill_1d_nb(out)
return out
@njit(cache=True)
def cash_nb(cash_flow, group_lens, init_cash):
"""Get cash series per column."""
check_group_lens(group_lens, cash_flow.shape[1])
out = np.empty_like(cash_flow)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
for i in range(cash_flow.shape[0]):
for col in range(from_col, to_col):
cash_now = init_cash[col] if i == 0 else out[i - 1, col]
out[i, col] = add_nb(cash_now, cash_flow[i, col])
from_col = to_col
return out
@njit(cache=True)
def cash_in_sim_order_nb(cash_flow, group_lens, init_cash_grouped, call_seq):
"""Get cash series in simulation order."""
check_group_lens(group_lens, cash_flow.shape[1])
out = np.empty_like(cash_flow)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
cash_now = init_cash_grouped[group]
for i in range(cash_flow.shape[0]):
for k in range(group_len):
col = from_col + call_seq[i, from_col + k]
cash_now = add_nb(cash_now, cash_flow[i, col])
out[i, col] = cash_now
from_col = to_col
return out
@njit(cache=True)
def cash_grouped_nb(target_shape, cash_flow_grouped, group_lens, init_cash_grouped):
"""Get cash series per group."""
check_group_lens(group_lens, target_shape[1])
out = np.empty_like(cash_flow_grouped)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
cash_now = init_cash_grouped[group]
for i in range(cash_flow_grouped.shape[0]):
flow_value = cash_flow_grouped[i, group]
cash_now = add_nb(cash_now, flow_value)
out[i, group] = cash_now
from_col = to_col
return out
# ############# Performance ############# #
@njit(cache=True)
def holding_value_nb(close, shares):
"""Get holding value series per column."""
return close * shares
@njit(cache=True)
def holding_value_grouped_nb(holding_value, group_lens):
"""Get holding value series per group."""
check_group_lens(group_lens, holding_value.shape[1])
out = np.empty((holding_value.shape[0], len(group_lens)), dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
out[:, group] = np.sum(holding_value[:, from_col:to_col], axis=1)
from_col = to_col
return out
@njit(cache=True)
def value_in_sim_order_nb(cash, holding_value, group_lens, call_seq):
"""Get portfolio value series in simulation order."""
check_group_lens(group_lens, cash.shape[1])
out = np.empty_like(cash)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
holding_value_now = 0.
# Without correctly treating NaN values, after one NaN all will be NaN
since_last_nan = group_len
for j in range(cash.shape[0] * group_len):
i = j // group_len
col = from_col + call_seq[i, from_col + j % group_len]
if j >= group_len:
last_j = j - group_len
last_i = last_j // group_len
last_col = from_col + call_seq[last_i, from_col + last_j % group_len]
if not np.isnan(holding_value[last_i, last_col]):
holding_value_now -= holding_value[last_i, last_col]
if np.isnan(holding_value[i, col]):
since_last_nan = 0
else:
holding_value_now += holding_value[i, col]
if since_last_nan < group_len:
out[i, col] = np.nan
else:
out[i, col] = cash[i, col] + holding_value_now
since_last_nan += 1
from_col = to_col
return out
@njit(cache=True)
def value_nb(cash, holding_value):
"""Get portfolio value series per column/group."""
return cash + holding_value
@njit(cache=True)
def total_profit_nb(target_shape, close, order_records, col_map):
"""Get total profit per column.
A much faster version than the one based on `value_nb`."""
col_idxs, col_lens = col_map
col_start_idxs = np.cumsum(col_lens) - col_lens
shares = np.full(target_shape[1], 0., dtype=np.float_)
cash = np.full(target_shape[1], 0., dtype=np.float_)
zero_mask = np.full(target_shape[1], False, dtype=np.bool_)
for col in range(col_lens.shape[0]):
col_len = col_lens[col]
if col_len == 0:
zero_mask[col] = True
continue
last_id = -1
for i in range(col_len):
r = col_idxs[col_start_idxs[col] + i]
record = order_records[r]
if record['id'] < last_id:
raise ValueError("id must come in ascending order per column")
last_id = record['id']
# Fill shares
if record['side'] == OrderSide.Buy:
order_size = record['size']
shares[col] = add_nb(shares[col], order_size)
else:
order_size = record['size']
shares[col] = add_nb(shares[col], -order_size)
# Fill cash
if record['side'] == OrderSide.Buy:
order_cash = record['size'] * record['price'] + record['fees']
cash[col] = add_nb(cash[col], -order_cash)
else:
order_cash = record['size'] * record['price'] - record['fees']
cash[col] = add_nb(cash[col], order_cash)
total_profit = cash + shares * close[-1, :]
total_profit[zero_mask] = 0.
return total_profit
@njit(cache=True)
def total_profit_grouped_nb(total_profit, group_lens):
"""Get total profit per group."""
check_group_lens(group_lens, total_profit.shape[0])
out = np.empty((len(group_lens),), dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
out[group] = np.sum(total_profit[from_col:to_col])
from_col = to_col
return out
@njit(cache=True)
def final_value_nb(total_profit, init_cash):
"""Get total profit per column/group."""
return total_profit + init_cash
@njit(cache=True)
def total_return_nb(total_profit, init_cash):
"""Get total return per column/group."""
return total_profit / init_cash
@njit(cache=True)
def get_return_nb(input_value, output_value):
"""Get return from input and output value."""
if input_value == 0:
if output_value == 0:
return 0.
return np.inf * np.sign(output_value)
return_value = (output_value - input_value) / input_value
if input_value < 0:
return_value *= -1
return return_value
@njit(cache=True)
def returns_nb(value, init_cash):
"""Get portfolio return series per column/group."""
out = np.empty(value.shape, dtype=np.float_)
for col in range(out.shape[1]):
input_value = init_cash[col]
for i in range(out.shape[0]):
output_value = value[i, col]
out[i, col] = get_return_nb(input_value, output_value)
input_value = output_value
return out
@njit(cache=True)
def returns_in_sim_order_nb(value_iso, group_lens, init_cash_grouped, call_seq):
"""Get portfolio return series in simulation order."""
check_group_lens(group_lens, value_iso.shape[1])
out = np.empty_like(value_iso)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
input_value = init_cash_grouped[group]
for j in range(value_iso.shape[0] * group_len):
i = j // group_len
col = from_col + call_seq[i, from_col + j % group_len]
output_value = value_iso[i, col]
out[i, col] = get_return_nb(input_value, output_value)
input_value = output_value
from_col = to_col
return out
@njit(cache=True)
def active_returns_nb(cash_flow, holding_value):
"""Get active return series per column/group."""
out = np.empty_like(cash_flow)
for col in range(cash_flow.shape[1]):
for i in range(cash_flow.shape[0]):
input_value = 0. if i == 0 else holding_value[i - 1, col]
output_value = holding_value[i, col] + cash_flow[i, col]
out[i, col] = get_return_nb(input_value, output_value)
return out
@njit(cache=True)
def market_value_nb(close, init_cash):
"""Get market value per column."""
return close / close[0] * init_cash
@njit(cache=True)
def market_value_grouped_nb(close, group_lens, init_cash_grouped):
"""Get market value per group."""
check_group_lens(group_lens, close.shape[1])
out = np.empty((close.shape[0], len(group_lens)), dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
group_len = to_col - from_col
col_init_cash = init_cash_grouped[group] / group_len
close_norm = close[:, from_col:to_col] / close[0, from_col:to_col]
out[:, group] = col_init_cash * np.sum(close_norm, axis=1)
from_col = to_col
return out
@njit(cache=True)
def total_market_return_nb(market_value):
"""Get total market return per column/group."""
out = np.empty((market_value.shape[1],), dtype=np.float_)
for col in range(market_value.shape[1]):
out[col] = get_return_nb(market_value[0, col], market_value[-1, col])
return out
@njit(cache=True)
def gross_exposure_nb(holding_value, cash):
"""Get gross exposure per column/group."""
out = np.empty(holding_value.shape, dtype=np.float_)
for col in range(out.shape[1]):
for i in range(out.shape[0]):
denom = add_nb(holding_value[i, col], cash[i, col])
if denom == 0:
out[i, col] = 0.
else:
out[i, col] = holding_value[i, col] / denom
return out
| 38.947138 | 128 | 0.568313 |
7954a11e07dbae3e6a24c392e374aafaee409b6d | 3,298 | py | Python | objective_functions.py | aurelienserre/samestats | 91a4471e8e85cff198b206ec9a36883811215596 | [
"BSD-3-Clause"
] | null | null | null | objective_functions.py | aurelienserre/samestats | 91a4471e8e85cff198b206ec9a36883811215596 | [
"BSD-3-Clause"
] | null | null | null | objective_functions.py | aurelienserre/samestats | 91a4471e8e85cff198b206ec9a36883811215596 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
class DistToPoints(nn.Module):
"""Compute the sum over all the points given as input of
their squared distances to some target points."""
def __init__(self, targets):
"""Initializes the targets
:targets: (np.array or torch.Tensor) target points
dimensions: (n_targets, n_coords_of_points)
"""
super(DistToPoints, self).__init__()
self.targets = torch.Tensor(targets)
def forward(self, points):
"""Computes the sum over all points of the squared distance to the
closest target"""
points = torch.unsqueeze(points, dim=1) # (n_points, 1, n_coords)
sq_diff = (self.targets - points) ** 2 # (n_points, n_targets, n_coords)
dist_mat = torch.sqrt((sq_diff).sum(dim=2)) # (n_points, n_targets)
dist_to_closest = torch.min(dist_mat, dim=1).values # (n_points,)
sum_of_dists = dist_to_closest.sum() # scalar
return sum_of_dists
class DistToLines2D(nn.Module):
"""Compute the sum over all the points given as input of
their squared distances to some target lines. Only for 2D datasets"""
def __init__(self, lines):
"""Initializes the target lines
:lines: (np.array or torch.Tensor) target lines defines by two points
dimensions: (n_targets, 2, n_coords=2)
"""
super(DistToLines2D, self).__init__()
self.lines = torch.Tensor(lines)
def forward(self, points):
"""Computes the sum over all points of the squared distance to the
closest line"""
# let M a point from the dataset, and (P1, P2) the two points defining a target line.
# P1P2 = P2 - P1
P1P2 = self.lines[:, 1, :] - self.lines[:, 0, :] # (n_targets, 2)
# norm of P1P2
seg_norm = torch.sqrt((P1P2 ** 2).sum(dim=1)) # (n_targets,)
# P1M = M - P1, P2M = M - P2
P1M = points[:, None, :] - self.lines[:, 0, :] # (n_points, n_targets, 2)
P2M = points[:, None, :] - self.lines[:, 1, :] # (n_points, n_targets, 2)
# dot product P1M . P1P2
dot_prod = torch.matmul(P1P2[:, None, :],
P1M[:, :, :, None]).squeeze() # (n_points, n_targets)
# shortest distance from M to P1 or P2
dist_closest = torch.min(torch.sqrt((P1M ** 2).sum(dim=-1)),
torch.sqrt((P2M ** 2).sum(dim=-1))) # (n_points, n_targets)
# projection of M on (P1P2)
H = self.lines[:, 0, :] \
+ (dot_prod / (seg_norm ** 2)).unsqueeze(dim=-1) * P1P2 # (n_points, n_targets, 2)
# distance from M to its projection H
MH = H - points.unsqueeze(dim=1) # (n_points, n_targets, 2)
dist_proj = torch.sqrt((MH ** 2).sum(dim=-1)) # (n_points, n_targets)
# dist from M to segment P1P2 = dist_proj if H falls on the segment
# P1P2, or dist_closest otherwise
dist = torch.where((0 < dot_prod) & (dot_prod < (seg_norm) ** 2),
dist_proj, dist_closest) # (n_points, n_targets)
dist_to_closest = torch.min(dist, dim=1).values # (n_points,)
sum_of_dist = dist_to_closest.sum() # scalar
return sum_of_dist
| 40.219512 | 96 | 0.579442 |
7954a1ff53f499c2aebac3c62ba1eacbfe522892 | 1,723 | py | Python | test/test_copyright.py | livanov93/launch_param_builder | 09855c93d959b5de3f273ba3b146f76275f31588 | [
"BSD-3-Clause"
] | 1 | 2022-01-20T23:43:25.000Z | 2022-01-20T23:43:25.000Z | test/test_copyright.py | livanov93/launch_param_builder | 09855c93d959b5de3f273ba3b146f76275f31588 | [
"BSD-3-Clause"
] | 5 | 2021-08-11T23:31:27.000Z | 2022-01-20T23:53:13.000Z | test/test_copyright.py | livanov93/launch_param_builder | 09855c93d959b5de3f273ba3b146f76275f31588 | [
"BSD-3-Clause"
] | 3 | 2021-08-12T18:42:51.000Z | 2022-01-07T16:58:43.000Z | # Copyright 2021 PickNik Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the PickNik Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ament_copyright.main import main
import pytest
@pytest.mark.copyright
@pytest.mark.linter
def test_copyright():
rc = main(argv=[".", "test"])
assert rc == 0, "Found errors"
| 44.179487 | 77 | 0.763784 |
7954a311fb1626a4fe5e15d6ac1727f190b0ddae | 29,970 | py | Python | scripts/processors/Tokenisers.py | arnacadia/Oscar | 469b26495d46bcca604884d9b0347a29ad3bd572 | [
"Apache-2.0"
] | null | null | null | scripts/processors/Tokenisers.py | arnacadia/Oscar | 469b26495d46bcca604884d9b0347a29ad3bd572 | [
"Apache-2.0"
] | null | null | null | scripts/processors/Tokenisers.py | arnacadia/Oscar | 469b26495d46bcca604884d9b0347a29ad3bd572 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## Project: Simple4All - January 2013 - www.simple4all.org
## Contact: Oliver Watts - owatts@staffmail.ed.ac.uk
## Contact: Antti Suni - Antti.Suni@helsinki.fi
#from naive.naive_util import *
import unicodedata
import glob
from processors.UtteranceProcessor import SUtteranceProcessor, Element
# from processors.NodeSplitter import *
# from processors.NodeEnricher import *
import datetime
from naive import naive_util
from tokenizer import tokenize, TOK
try:
import regex as new_regex
except ImportError:
sys.exit('Please install "regex": https://pypi.python.org/pypi/regex ')
import default.const as c
class RegexTokeniser(SUtteranceProcessor):
'''
A very crude tokeniser, which:
1. splits text with a regular expression specified
in config file, which defaults to whitespace. Note that whether spaces etc. are
treated as tokens or not depends on use of brackets in the regex -- cf. (\s+) and \s+
2. optionally
3. classifies tokens on the basis of regex
4. optionally add safetext representation
'''
def __init__(self, processor_name='regex_tokeniser', target_nodes = '//utt', split_attribute = 'text', \
child_node_type = 'token', add_terminal_tokens=True, split_pattern='\s+', \
add_token_classes = True, \
class_patterns = [('space', '\A\s+\Z'), ('punctuation', '\A[\.\,\;\!\?\s]+\Z')], \
default_class = 'word', class_attribute='token_class',
add_safetext = True,
safetext_attribute = 'safetext',
lowercase_safetext = True):
self.processor_name = processor_name
self.split_pattern = split_pattern
self.target_nodes = target_nodes
self.split_attribute = split_attribute
self.child_node_type = child_node_type
self.add_terminal_tokens = add_terminal_tokens
self.class_patterns = [(name, new_regex.compile(patt)) for (name, patt) in class_patterns]
self.default_class = default_class
self.class_attribute = class_attribute
self.add_token_classes = add_token_classes
self.add_safetext = add_safetext
self.safetext_attribute = safetext_attribute
self.lowercase_safetext = lowercase_safetext
self.regex = new_regex.compile(self.split_pattern)
super(RegexTokeniser, self).__init__()
def process_utterance(self, utt):
#print 'target nodes: %s'%(utt.xpath(self.target_nodes))
for node in utt.xpath(self.target_nodes):
assert node.has_attribute(self.split_attribute)
to_split = node.get(self.split_attribute)
child_chunks = self.splitting_function(to_split)
for chunk in child_chunks:
#print '=='
#print chunk
child = Element(self.child_node_type)
child.set(self.split_attribute, chunk)
if self.add_token_classes:
token_class = self.classify_token(chunk)
#print token_class
child.set(self.class_attribute, token_class)
if self.add_safetext:
token_safetext = self.safetext_token(chunk)
child.set(self.safetext_attribute, token_safetext)
node.add_child(child)
def classify_token(self, token):
## Special handling of terminal token:
if token == c.TERMINAL:
return c.TERMINAL
for (item_class, pattern) in self.class_patterns:
if pattern.match(token):
return item_class
## if we have got this far, none of patterns matched -- return default:
return self.default_class
def safetext_token(self, instring):
## Special handling of terminal token:
if instring == c.TERMINAL:
return c.TERMINAL
else:
if self.lowercase_safetext == 'True':
return naive_util.safetext(instring.lower())
else:
return naive_util.safetext(instring)
def splitting_function(self, instring):
tokens = self.regex.split(instring)
tokens = [t for t in tokens if t != '']
if self.add_terminal_tokens:
tokens = [c.TERMINAL] + tokens + [c.TERMINAL]
return tokens
def do_training(self, speech_corpus, text_corpus):
print "RegexTokeniser requires no training"
class IcelandicTokeniser(SUtteranceProcessor):
'''
A very crude tokeniser, which:
1. splits text with a regular expression specified
in config file, which defaults to whitespace. Note that whether spaces etc. are
treated as tokens or not depends on use of brackets in the regex -- cf. (\s+) and \s+
2. optionally
3. classifies tokens on the basis of regex
4. optionally add safetext representation
'''
def __init__(self, processor_name='icelandic_tokeniser', target_nodes = '//utt', split_attribute = 'text', \
child_node_type = 'token', add_terminal_tokens=True, \
add_token_classes = True, \
default_class = 'word', class_attribute='token_class',
add_safetext = True,
safetext_attribute = 'safetext', \
class_patterns = [('space', '\A\s+\Z'), ('punctuation', '\A[\.\,\;\!\?\s]+\Z')],
lowercase_safetext = True):
self.processor_name = processor_name
self.target_nodes = target_nodes
self.split_attribute = split_attribute
self.child_node_type = child_node_type
self.add_terminal_tokens = add_terminal_tokens
self.default_class = default_class
self.class_attribute = class_attribute
self.add_token_classes = add_token_classes
self.class_patterns = [(name, new_regex.compile(patt)) for (name, patt) in class_patterns]
self.add_safetext = add_safetext
self.safetext_attribute = safetext_attribute
self.lowercase_safetext = lowercase_safetext
super(IcelandicTokeniser, self).__init__()
def process_utterance(self, utt):
#print 'target nodes: %s'%(utt.xpath(self.target_nodes))
for node in utt.xpath(self.target_nodes):
assert node.has_attribute(self.split_attribute)
to_split = node.get(self.split_attribute)
child_chunks = self.splitting_function(to_split)
for chunk in child_chunks:
#print '=='
#print chunk
child = Element(self.child_node_type)
child.set(self.split_attribute, chunk)
if self.add_token_classes:
token_class = self.classify_token(chunk)
#print token_class
child.set(self.class_attribute, token_class)
if self.add_safetext:
token_safetext = self.safetext_token(chunk)
child.set(self.safetext_attribute, token_safetext)
node.add_child(child)
def classify_token(self, token):
## Special handling of terminal token:
if token == c.TERMINAL:
return c.TERMINAL
for (item_class, pattern) in self.class_patterns:
if pattern.match(token):
return item_class
return self.default_class
def safetext_token(self, instring):
## Special handling of terminal token:
if instring == c.TERMINAL:
return c.TERMINAL
else:
if self.lowercase_safetext == 'True':
return naive_util.safetext(instring.lower())
else:
return naive_util.safetext(instring)
def splitting_function(self, instring):
tokens = tokenize(instring)
tokens = [w for t in tokens if t.txt is not None and t.txt != '' for w in t.txt.split()]
if self.add_terminal_tokens:
tokens = [c.TERMINAL] + tokens + [c.TERMINAL]
return tokens
def do_training(self, speech_corpus, text_corpus):
print "IcelandicTokeniser requires no training"
'''
http://www.fileformat.info/info/unicode/category/index.htm:
Code Description
[Cc] Other, Control
[Cf] Other, Format
[Cn] Other, Not Assigned (no characters in the file have this property)
[Co] Other, Private Use
[Cs] Other, Surrogate
[LC] Letter, Cased
[Ll] Letter, Lowercase
[Lm] Letter, Modifier
[Lo] Letter, Other
[Lt] Letter, Titlecase
[Lu] Letter, Uppercase
[Mc] Mark, Spacing Combining
[Me] Mark, Enclosing
[Mn] Mark, Nonspacing
[Nd] Number, Decimal Digit
[Nl] Number, Letter
[No] Number, Other
[Pc] Punctuation, Connector
[Pd] Punctuation, Dash
[Pe] Punctuation, Close
[Pf] Punctuation, Final quote (may behave like Ps or Pe depending on usage)
[Pi] Punctuation, Initial quote (may behave like Ps or Pe depending on usage)
[Po] Punctuation, Other
[Ps] Punctuation, Open
[Sc] Symbol, Currency
[Sk] Symbol, Modifier
[Sm] Symbol, Math
[So] Symbol, Other
[Zl] Separator, Line
[Zp] Separator, Paragraph
[Zs] Separator, Space
'''
# class SafeTextMaker(NodeEnricher):
# '''Lowercase, convert to ascii-safe strings, but handle terminal token specially'''
# def load(self):
# NodeEnricher.load(self)
# self.lowercase = self.config.get('lowercase', 'True') ## string not bool
# class RegexClassifier(NodeEnricher):
# '''Classifies nodes based on comparing their input_attribute against a sequence
# of classes and associated regular expressions. The sequence is specified in
# a config subsection. The sequence is iterated through, when a pattern is matched
# the search stops and the class corresponding to the matched pattern is assigned to
# the node under output_attribute. If none are matched, default_class is assigned.'''
# def load(self):
# NodeEnricher.load(self)
# if 'classes' not in self.config:
# sys.exit('Please specify classes for RegexClassifier')
# self.classes = self.config['classes']
# if 'default_class' not in self.config:
# sys.exit('Please specify default_class for RegexClassifier')
# self.default_class = self.config.get('default_class')
# # token_classes = config_list(self.config.get('token_classes', ['space','not_space']))
# # token_class_patterns = config_list(self.config.get('token_class_patterns', ['\s+']))
# # print token_classes
# # print token_class_patterns
# # assert len(token_classes) == (len(token_class_patterns) + 1),'One more class must be \
# # given than patterns, as the default case'
# ## Compile on load, adding string-end symbols:
# self.class_patterns = [(name, new_regex.compile('\A%s\Z'%(string))) \
# for (name, string) in self.classes.items()]
# # def enriching_function(self, instring):
# def do_training(self, speech_corpus, text_corpus):
# print "RegexTokeniser requires no training"
# def do_training(self, speech_corpus, text_corpus):
# print "RegexTokeniser requires no training"
#
# class CharClassTokenClassifier(NodeEnricher):
# '''Classifies token based on list of classes and associated regular expresssions.'''
# def load(self):
# token_classes = config_list(self.config.get('token_classes', ['space','not_space']))
# token_class_patterns = config_list(self.config.get('token_class_patterns', ['\s+']))
# print token_classes
# print token_class_patterns
# assert len(token_classes) == (len(token_class_patterns) + 1),'One more class must be \
# given than patterns, as the default case'
# ## Split classes into ones with patterns and default:
# self.token_classes = token_classes[:-1]
# self.default_token_class = token_classes[-1]
# ## Compile on load, adding string-end symbols:
# self.token_class_patterns = [re.compile('\A%s\Z'%(string)) for string in token_class_patterns]
#
# def classify(self, instring):
# ## Special handling of utterance end token:
# if instring == c.UTTEND:
# return c.UTTEND
# for (token_class, pattern) in zip(self.token_classes, self.token_class_patterns):
# if re.match(pattern, instring):
# return token_class
# ## if we have got this far, none of patterns matched -- return default:
# return self.default_token_class
#
# def train(self):
# print "RegexTokeniser requires no training"
#
#
#
# class CharClassTokeniser(NodeSplitter):
# '''
# Simple tokeniser which relies on characer classes, which can be specified using Unicode
# character properties, to split text into tokens.
#
# Depend on https://pypi.python.org/pypi/regex, a "new regex implementation [which] is
# intended eventually to replace Python's current re module implementation"
# '''
#
# def load(self):
#
# NodeSplitter.load(self)
# self.split_pattern = self.config.get('split_pattern', '\s+')
#
#
#
#
#
# if 'character_classes' not in self.config:
# sys.exit("List of character classes must be specified for CharClassTokeniser")
# self.character_classes = self.config['character_classes']
# for (name, pattern) in self.character_classes.items():
# self.split_pattern = self.split_pattern.replace(name, pattern)
#
# self.regex = new_regex.compile(self.split_pattern, new_regex.UNICODE)
#
# def splitting_function(self, instring):
# tokens = new_regex.split(self.regex, instring)
# tokens = [t for t in tokens if t != '']
# if self.add_terminal_tokens:
# tokens = [c.UTTEND] + tokens + [c.UTTEND]
# return tokens
#
# def do_training(self, speech_corpus, text_corpus):
# print "CharClassTokeniser requires no training"
#
#
# class TweakedCharPropTokeniser(NodeSplitter):
# '''
# As CharPropTokeniser , but allow user to modify char classes by editing a character table
# '''
# # def __init__(self, table_file, character_class_precedence, \
# # character_class_patterns, tokeniser_split_pattern):
# #
# # self.table = {} ## This table starts empty but can be populated by training
# #
# # self.table_file = table_file
# # self.character_class_precedence = character_class_precedence
# # self.character_class_patterns = character_class_patterns
# # self.tokeniser_split_pattern = tokeniser_split_pattern
# #
# # assert len(self.character_class_precedence) == len(self.character_class_patterns)
# # self.character_classes = dict(zip(self.character_class_precedence, self.character_class_patterns))
# #
# # self.unicode_category_map = {} ## will map: coarse categories->regex
# # self.populate_unicode_category_map()
# #
# # # table data has already been collected, load it from file:
# # if os.path.isfile(table_file):
# # self.populate_table_from_file(table_file)
# #
# # if self.table != {}:
# # self.compile_regex()
# ## OSW TODO: similar to function in LookupTable -- combine them?
# ## Differences: here, get header from first line
# def populate_table_from_file(self, fname):
# data = readlist(fname)
# data = [re.split("\t", line) for line in data]
# if data == []:
# print "warning: no data loaded from table"
# pass
# else:
# self.header_line = data[0]
# header = self.header_line[1:]
# for line in data[1:]:
# assert len(line) == len(header)+1
# lemma = line[0]
# ## handle tab replacements:
# if "\\t" in lemma:
# lemma = lemma.replace("\\t", "\t")
# self.table[lemma] = {}
# for (key, value) in zip(header, line[1:]):
# self.table[lemma][key] = value
# def compile_regex(self):
# self.split_regex_string = self.tokeniser_split_pattern
# interpolated_regex = self.interpolate_split_regex()
# self.split_regex = re.compile(interpolated_regex, re.UNICODE)
# def tokenise(self, data):
# chunks = re.split(self.split_regex, data)
# ## The empty string might be in chunks -- filter:
# chunks = [chunk for chunk in chunks if chunk != ""]
# return chunks
# def tokenise_to_string(self, data, delimiter="___"):
# assert delimiter not in data,"Problem in tokenise_to_string: delimiter '%s' occurs in input string '%s'"%(delimiter, data)
# tokens = self.tokenise(data)
# tokens = delimiter.join(tokens)
# return tokens
# def tokenise_textfiles(self, fnames_in, fname_out, strip_classes=[], token_unit="word"):
# """
# Take a list utf-8 textfile fnames_in, read 1 line at a time, and do the following:
# --tokenise
# --convert to safetext
# --strip tokens in strip_classes (if any)
# Write accumulated data to fname_out, whitespace-delimiting the tokens
# """
# #print "Tokenising text corpus..."
# f_out = open(fname_out, "w")
# i = 0
# for fname_in in fnames_in:
# f_in = codecs.open(fname_in, encoding='utf-8')
# for line in f_in:
# line = line.strip(" \n\r")
# if in_unicode_table(line):
# if token_unit == "word":
# line = self.tokenise(line)
# elif token_unit == "letter":
# line = list(line)
# else:
# print "token_unit '%s' not recognised"%(token_unit)
# line = [token for token in line if self.token_class(token) not in strip_classes]
# line = [self.safetext(token) for token in line]
# line = " ".join(line) + "\n"
# f_out.write(line)
# else:
# print "Tokenising text corpus: skip line -- can't find a character in unicode database"
# try:
# print line
# except UnicodeEncodeError:
# print "[line not printable]" ## Added because got error on a French corpus:
# ## UnicodeEncodeError: 'charmap' codec can't encode
# ## character u'\xef' in position 61: character maps to <undefined>
# i += 1
# if i % 10000 == 0:
# print ".",
# f_in.close()
# f_out.close()
# def train(self, text_list):
# '''
# Provided with a list of text files (a sample of the
# sort of text a TTS system might encounter), safetext and character_class
# will be precomputed for all character types appearing in the sample.
# This will probably improve efficiency, but more importantly it lets a
# user visually check the values to make sure they are sensible, and alter
# as necessary.
# '''
# assert len(text_list) > 0,"No transcriptions exist"
# character_counts = {}
# ## Count character types in text:
# #print "Learning character table..."
# i = 0
# for text_file in text_list:
# text = readlist(text_file, check_unicode_database=True)
# for line in text:
# i += 1
# if i % 10000==0:
# print ".",
# for character in line:
# if character not in character_counts:
# character_counts[character] = 0
# character_counts[character] += 1
# ## Precompute safetext and coarse categories, store in self.table:
# for (character, count) in character_counts.items():
# self.add_to_table(character, frequency=count)
# ## reload self to get regex patterns compiled:
# self.compile_regex()
# def save(self):
# if self.table != {}:
# ## rewrite tabs as strings:
# for (key,value) in self.table.items():
# if "\t" in key:
# key = key_replace("\t", "\\t")
# text_table = flatten_mapping(self.table, \
# sort_by=["character_class", "frequency"], reverse_sort=True)
# text_table = [key + "\t" + value for (key, value) in text_table]
# writelist(text_table, self.table_file, uni=True)
# def add_to_table(self, character, frequency=0):
# ## TODO: private and public character_class() etc --
# assert type(character) == unicode
# safetext = self.safetext(character)
# character_class = self.character_class(character)
# ## date-stamp entries so we know when stuff is added (hours?)
# added_on = str(datetime.date.today())
# self.table[character] = {"safetext": safetext,
# "character_class": character_class,
# "added_on": added_on,
# "frequency": frequency}
# def display_table(self):
# for (key, value) in self.table.items():
# print "%s %s"%(key, value)
# def alert_user(self):
# print """
# Lists of letters and punctuation written to
# %s and
# %s.
# Check and adjust
# manually before continuing. ASCII substitutions are given -- keep these
# HTK etc. compatible. Assume that all separators will be matched by
# regex \s -- i.e. don't list them explicitly.
# """%(letter_fname, punc_fname)
# def populate_unicode_category_map(self):
# '''
# Look at self.config['character_classes'] which contains coarse categories
# as keys and regex as values. From this an a list of all unicode categories,
# make self.unicode_category_map, which maps from all legal fine unicode
# categories to coarse category.
# '''
# # For listing of unicode categories, see e.g. http://www.fileformat.info/info/unicode/category/index.htm
# unicode_categories = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'LC', 'Ll', 'Lm', 'Lo', \
# 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', \
# 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', \
# 'So', 'Zl', 'Zp', 'Zs']
# # Map all unicode categories to a coarse class whose regular expression matches
# # it. Where there are multiple matches, take the last in list:
# for code in unicode_categories:
# for (coarse_class, regex) in self.character_classes.items():
# if re.match(regex, code):
# self.unicode_category_map[code] = coarse_class
# def character_class(self, unicode_string):
# '''
# Get the coarse class (e.g. punc) for a unicode character. Try lookup in self.table,
# if it is a new character, look up category in unicodedata and convert to coarse
# class.
# '''
# assert len(unicode_string) == 1,'''Method character_type can only be used on
# single characters'''
# if unicode_string in self.table:
# return self.table[unicode_string]['character_class']
# else:
# ## Look up unicode category in unicode data then translate to coarse class:
# try:
# cclass = unicodedata.category(unicode_string)
# except:
# print 'Could not get a category for %s from unicodedata'%(unicode_string)
# sys.exit(1)
# assert cclass in self.unicode_category_map,'Category %s not in map'%(cclass)
# return self.unicode_category_map[cclass]
# def token_class(self, unicode_string):
# '''
# Assign a class to a token (sequence of characters) based on the classes of its
# constituent characters. character_class_precedence determines what the token's
# class will be.
# TODO: explain and motivate precedence.
# '''
# ## Get classes of letters in the token:
# classes = [self.character_class(char) for char in unicode_string]
# for character_type in self.character_class_precedence:
# if character_type in classes:
# return character_type
# ### Was originally this:
# # # If we get this far, the loop was not broken by return -- error:
# # print 'No token class found for token %s'%(unicode_string)
# # sys.exit(1)
# ### ... but this is not robust enough against weird characters -- choose some
# ### arbitrary value instead:
# return self.character_class_precedence[0] ## return arbitrary value
# def safetext(self, unicode_string):
# safetext = ''
# for char in unicode_string:
# if char in self.table:
# safetext += self.table[char]['safetext']
# else:
# safetext += self.unicode_character_to_safetext(char)
# return safetext
# def unicode_character_to_safetext(self, char):
# '''
# Return value from self.table if it exists, otherwise work one out. The substitute
# should be safe to use with applications of interest (e.g. in HTK modelnames), and
# a perhaps over-cautious subset of ASCII is used for this (uppercase A-Z).
# TODO: [make this explanation complete]
# To enable
# reverse mapping, multicharacter safetexts are delimited with _.
# TODO: Simultaneously map to lowercase -- move this elsewhere? Optional?
# '''
# ## Replacements to make greedily within unicode name:
# name_reps = {" ": "",
# "-": "",
# "0": "ZERO",
# "1": "ONE",
# "2": "TWO",
# "3": "THREE",
# "4": "FOUR",
# "5": "FIVE",
# "6": "SIX",
# "7": "SEVEN",
# "8": "EIGHT",
# "9": "NINE" }
# if char.lower() in list("abcdefghijklmnopqrstuvwxyz"):
# substitute = char.lower()
# else:
# try:
# substitute = unicodedata.name(char.lower())
# except ValueError: ## got ValueError: no such name
# substitute = "PROBLEM CHARACTER"
# for key in name_reps.keys():
# substitute = substitute.replace(key, name_reps[key])
# substitute = "_" + substitute + "_"
# return substitute
# def character_class_regex_dict(self):
# """
# Get a dict mapping character_class names to regex strings to match one
# instance of that class.
# """
# outdict = {}
# for char_class in self.character_class_precedence:
# outdict[char_class] = self.character_class_regex(char_class)
# return outdict
# def character_class_regex(self, character_class):
# """
# Return a string that will be compiled into regex for matching 1 occurance of
# any memeber of character_class (as given in character_class of self.table).
# NOTE: this won't handle OOV characters --- they need to be added to table first.
# What would really be good here is proper unicode regular expressions (
# allowing matching on unicode properties: see e.g.
# http://www.regular-expressions.info/unicode.html)
# """
# pattern = u""
# assert self.table != {},"CharacterTable's table must have entries to compile a character_class_regex\nTry the train() method"
# for character in self.table.keys():
# if self.table[character]["character_class"] == character_class:
# pattern += character # .decode("utf-8")
# pattern = re.escape(pattern) ## Deal with literals of regex special characters
# pattern = "[" + pattern + "]"
# return pattern
# def interpolate_split_regex(self):
# class_dict = self.character_class_regex_dict()
# interpolated_regex = unicode(self.split_regex_string)
# for (name, wildcard) in class_dict.items():
# interpolated_regex = interpolated_regex.replace(unicode(name), wildcard)
# return interpolated_regex
| 39.279161 | 135 | 0.577744 |
7954a3d0a47521d0de8a0696092db68c68a3c7e5 | 1,672 | py | Python | server/sqlmap/thirdparty/beautifulsoup/__init__.py | kurpav/volcano | 31d5f8f6f5a282abbea3861368eb39cfe33bba77 | [
"MIT"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.sqlmap/thirdparty/beautifulsoup/__init__.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 29 | 2019-04-03T14:52:38.000Z | 2022-03-24T12:33:05.000Z | .modules/.sqlmap/thirdparty/beautifulsoup/__init__.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 161 | 2018-04-20T15:57:12.000Z | 2022-03-15T19:16:16.000Z | #!/usr/bin/env python
#
# Copyright (c) 2004-2010, Leonard Richardson
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the the Beautiful Soup Consortium and All
# Night Kosher Bakery nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
#
pass
| 44 | 79 | 0.763756 |
7954a4ab4531cd599765dd52ecd9dd499a49e412 | 7,853 | py | Python | anemometere.py | SvenA098072/ESHL_CBo_Post_Processing | d3b53f41011b4e8fed61175b4873e514cc4802ed | [
"MIT"
] | null | null | null | anemometere.py | SvenA098072/ESHL_CBo_Post_Processing | d3b53f41011b4e8fed61175b4873e514cc4802ed | [
"MIT"
] | null | null | null | anemometere.py | SvenA098072/ESHL_CBo_Post_Processing | d3b53f41011b4e8fed61175b4873e514cc4802ed | [
"MIT"
] | 1 | 2021-06-24T14:48:13.000Z | 2021-06-24T14:48:13.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 15:52:37 2021
@author: Devineni
"""
import pandas as pd
import numpy as np
from statistics import mean
import time
import datetime as dt
import matplotlib.pyplot as plt
import operator # for plotting
from openpyxl import load_workbook
# import mysql.connector
import os
import pymysql
from sqlalchemy import create_engine
from easygui import *
import sys
#from recalibration import clean_sql_reg
def prRed(skk): print("\033[31;1;m {}\033[00m" .format(skk))
import warnings
warnings.filterwarnings('ignore')
engine = create_engine("mysql+pymysql://root:Password123@localhost/",pool_pre_ping=True)
#%%
'''
This section deals with taking input selection of the experiment
easygui module was used to create the dialogue boxes for easy input
this is just a more visual way for experiment selection
'''
msg ="Please select a Location/Season you like to analyze"
title = "Season selection"
choices = ["ESHL_summer", "ESHL_winter", "CBo_summer", "CBo_winter"]
database = choicebox(msg, title, choices)
times = pd.read_excel('C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/excel_files/Times_thesis.xlsx', sheet_name= database)
msg ="Please select an experiment you would like to analyse in {database}".format(database = str(database))
title = "Experiment selection"
choices = list(times['short name'])
experiment = choicebox(msg, title, choices)
z = int(times[times['short name'] == experiment].index.values)
Vdot_sheets = {"ESHL_summer":"ESHL_Vdot", "ESHL_winter":"ESHL_Vdot", "CBo_summer":"CBo_Vdot", "CBo_winter":"CBo_Vdot"}
t0 = times.loc[z,"Start"]
tn = times.loc[z,"End"]
folder_name = times.loc[z,"short name"]
#%% Upload Anemometere data
# =============================================================================
# df = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/Kü_Testo/06.07.20 10_00.xlsx")
# df1 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/Kü_Testo/07.07.20 09_12.xlsx")
# df2 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/Kü_Testo/08.07.20 09_33.xlsx")
#
# df3 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/Kü_Testo/09.07.20 11_12.xlsx")
# df4 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/Kü_Testo/10.07.20 10_19.xlsx")
# df5 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/Kü_Testo/14.07.20 08_27.xlsx")
#
# df6 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/Kü_Testo/16.07.20 01_23.xlsx")
#
#
# dfs1 = [df,df1,df2,df3,df4,df5,df6]
# dfs = []
# for df in dfs1:
# df = df.iloc[:,[0,2,4]]
# df.columns = ["datetime", "hb_m/sec", "hb_°C"]
# df = df.set_index("datetime")
# dfs.append(df)
#
# kt_df = pd.concat(dfs)
# kt_df = kt_df.reset_index()
# kt_df["datetime"] = pd.to_datetime(kt_df["datetime"],format="%d-%m-%Y %H:%M:%S")
#
# kt_df.to_sql("eshl_summer_kt", con =create_engine("mysql+pymysql://root:Password123@localhost/anemometere",pool_pre_ping=True), if_exists="replace" )
#
#
# #%%
#
# df = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/WZ_Testo/06.07.20 09_59.xlsx")
# df1 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/WZ_Testo/07.07.20 09_12.xlsx")
# df2 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/WZ_Testo/08.07.20 09_33.xlsx")
#
# df3 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/WZ_Testo/09.07.20 11_12.xlsx")
# df4 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/WZ_Testo/10.07.20 10_19.xlsx")
# df5 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/WZ_Testo/14.07.20 08_27.xlsx")
#
# df6 = pd.read_excel("C:/Users/Devineni/OneDrive - bwedu/0_ESHL/0_ESHL_SUMMER/WZ_Testo/16.07.20 01_23.xlsx")
#
#
# dfs1 = [df,df1,df2,df3,df4,df5,df6]
# dfs = []
#
# for df in dfs1:
# df = df.iloc[:,[0,2,3]]
# df.columns = ["datetime", "hb_m/sec", "hb_°C"]
# df = df.set_index("datetime")
# dfs.append(df)
#
# wz_df = pd.concat(dfs)
# wz_df = wz_df.reset_index()
# wz_df["datetime"] = pd.to_datetime(wz_df["datetime"],format="%d-%m-%Y %H:%M:%S")
#
#
# wz_df.to_sql("eshl_summer_wz", con =create_engine("mysql+pymysql://root:Password123@localhost/anemometere",pool_pre_ping=True), if_exists="replace" )
#
# =============================================================================
#%%
#%%
df = pd.read_sql_query("SELECT * FROM anemometere.eshl_summer_kt WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).drop("index", axis = 1).set_index("datetime")
df1 = pd.read_sql_query("SELECT * FROM eshl_summer.1a_testo WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).drop("index", axis = 1).set_index("datetime")
df2 = pd.read_sql_query("SELECT * FROM eshl_summer.2a_testo WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).drop("index", axis = 1).set_index("datetime")
df3 = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND \
'{}'".format("weather", "weather_all", t0,\
tn), con = engine).set_index("datetime")
fig, (ax1,ax2,ax3) = plt.subplots(3,1, sharex=True,figsize=(19,15), sharey=True)
df.plot(y="hb_m/sec", ax = ax1, label = "LR")
df3.plot(y='Wind Speed, m/s', ax=ax1, color = 'silver', label = "windspeed")
ax4 = ax1.twinx()
df.plot(y="hb_°C", ax = ax4, label = "temp",color = "green" )
df1.plot(y="hb_m/sec", ax = ax2, label = "BD 01")
df3.plot(y='Wind Speed, m/s', ax=ax2, color = 'silver', label = "windspeed")
ax5 = ax2.twinx()
df1.plot(y="hb_°C", ax = ax5, label = "temp",color = "green" )
df2.plot(y="hb_m/sec", ax = ax3, label = "BD 02")
df3.plot(y='Wind Speed, m/s', ax=ax3, color = 'silver', label = "windspeed")
ax6 = ax3.twinx()
df2.plot(y="hb_°C", ax = ax6, label = "temp",color = "green" )
ax1.set_title(folder_name + " Anemometere Data in m/sec")
plt.savefig(folder_name+".png", figsize=(19,15))
#%%
'''
This section deals with taking input selection of the experiment
easygui module was used to create the dialogue boxes for easy input
this is just a more visual way for experiment selection
'''
msg ="Please select a Location/Season you like to analyze"
title = "Season selection"
choices = ["ESHL_summer", "ESHL_winter", "CBo_summer", "CBo_winter"]
database = choicebox(msg, title, choices)
times = pd.read_excel('C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/excel_files/Times_thesis.xlsx', sheet_name= database)
msg ="Please select an experiment you would like to analyse in {database}".format(database = str(database))
title = "Experiment selection"
choices = list(times['short name'])
experiment = choicebox(msg, title, choices)
z = int(times[times['short name'] == experiment].index.values)
Vdot_sheets = {"ESHL_summer":"ESHL_Vdot", "ESHL_winter":"ESHL_Vdot", "CBo_summer":"CBo_Vdot", "CBo_winter":"CBo_Vdot"}
t0 = times.loc[z,"Start"]
tn = times.loc[z,"End"]
folder_name = times.loc[z,"short name"]
#%%
wdf1 = pd.read_sql_query("SELECT * FROM eshl_winter.1a_testo WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).drop("index", axis = 1).set_index("datetime")
wdf2 = pd.read_sql_query("SELECT * FROM eshl_winter.2a_testo WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).drop("index", axis = 1).set_index("datetime")
fig, (ax1,ax2) = plt.subplots(2,1, sharex=True,figsize=(19,15))
wdf1.plot(y="hb_m/sec", ax = ax1, label = "BD 01")
ax3 = ax1.twinx()
wdf1.plot(y="hb_°C", ax = ax3, color = "green")
wdf2.plot(y="hb_m/sec", ax = ax2, label = "BD 01")
ax4 = ax2.twinx()
wdf2.plot(y="hb_°C", ax = ax4, color = "green")
ax1.set_title(folder_name + " Anemometere Data in m/sec")
plt.savefig(folder_name+".png", figsize=(19,15))
| 39.462312 | 177 | 0.684579 |
7954a521368ca2c1d9df918c7daae73e77138f42 | 913 | py | Python | libraries/botbuilder-schema/botbuilder/schema/callerid_constants.py | victor-kironde/botbuilder-python | e893d9b036d7cf33cf9c9afd1405450c354cdbcd | [
"MIT"
] | 1 | 2021-03-02T22:56:09.000Z | 2021-03-02T22:56:09.000Z | libraries/botbuilder-schema/botbuilder/schema/callerid_constants.py | victor-kironde/botbuilder-python | e893d9b036d7cf33cf9c9afd1405450c354cdbcd | [
"MIT"
] | null | null | null | libraries/botbuilder-schema/botbuilder/schema/callerid_constants.py | victor-kironde/botbuilder-python | e893d9b036d7cf33cf9c9afd1405450c354cdbcd | [
"MIT"
] | 1 | 2020-10-01T07:34:07.000Z | 2020-10-01T07:34:07.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
class CallerIdConstants(str, Enum):
public_azure_channel = "urn:botframework:azure"
"""
The caller ID for any Bot Framework channel.
"""
us_gov_channel = "urn:botframework:azureusgov"
"""
The caller ID for any Bot Framework US Government cloud channel.
"""
bot_to_bot_prefix = "urn:botframework:aadappid:"
"""
The caller ID prefix when a bot initiates a request to another bot.
This prefix will be followed by the Azure Active Directory App ID of the bot that initiated the call.
"""
| 35.115385 | 106 | 0.573932 |
7954a5a1d75d7afb2a51ca7d528a8989ba95526e | 2,586 | py | Python | tools/dice_coeff_check.py | smg478/Kaggle---Carvana-Image-Masking-Challange | 3df42c451a4d2218230e462eba9387a3b742f7ef | [
"MIT"
] | 2 | 2017-11-23T00:22:28.000Z | 2018-02-20T02:57:32.000Z | tools/dice_coeff_check.py | smg478/Kaggle---Carvana-Image-Masking-Challange | 3df42c451a4d2218230e462eba9387a3b742f7ef | [
"MIT"
] | null | null | null | tools/dice_coeff_check.py | smg478/Kaggle---Carvana-Image-Masking-Challange | 3df42c451a4d2218230e462eba9387a3b742f7ef | [
"MIT"
] | 1 | 2020-12-18T16:06:53.000Z | 2020-12-18T16:06:53.000Z | import pandas as pd
import numpy as np
import cv2
from tqdm import tqdm
from u_net import get_unet1_1024x1024
def dice_coeff(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
df_train = pd.read_csv('input/train_masks.csv')
ids_train = df_train['img'].map(lambda s: s.split('.')[0])
input_h = 640 #640 #1280 #640
input_w = 960 #960 #1920 #960
batch_size = 1
threshold = 0.5
model = get_unet1_1024x1024()
model.load_weights(filepath='weights/unet-1024_noblur_NoequalizeHist.16-0.99611.hdf5')
names = []
for id in ids_train:
names.append('{}.jpg'.format(id))
train_splits = len(ids_train) # Split test set (number of splits must be multiple of 2)
ids_train_splits = np.array_split(ids_train, indices_or_sections=train_splits)
split_count = 0
for ids_train_split in ids_train_splits:
split_count += 1
def train_generator():
while True:
for start in range(0, len(ids_train_split), batch_size):
x_batch = []
end = min(start + batch_size, len(ids_train_split))
ids_train_split_batch = ids_train_split[start:end]
for id in ids_train_split_batch.values:
img = cv2.imread('input/test_hq/{}.jpg'.format(id))
img = cv2.resize(img, (input_w, input_h))
#img = cv2.bilateralFilter(img, 3, 40, 40)
mask = cv2.imread('input/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (input_w, input_h))
mask = np.expand_dims(mask, axis=2)
x_batch.append(img)
y_batch.append(mask)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch
print("Predicting on {} samples (split {}/{})".format(len(ids_train_split), split_count, train_splits))
preds = model.predict_generator(generator=train_generator(),
steps=(len(ids_train_split) // batch_size) + 1, verbose=1)
mask = cv2.imread('input/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (input_w, input_h))
mask = np.expand_dims(mask, axis=2)
dice_coeff = dice_coeff(mask, preds)
print('dice_coeff={} image name:{}'.format(dice_coeff,ids_train_split))
| 35.916667 | 107 | 0.63109 |
7954a5ca842a9987402318e666c174641abd47b6 | 58,256 | py | Python | jax/interpreters/pxla.py | clemisch/jax | 771d7c8ba5dd0c64d72556712b5d4329878a20cc | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/interpreters/pxla.py | clemisch/jax | 771d7c8ba5dd0c64d72556712b5d4329878a20cc | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/interpreters/pxla.py | clemisch/jax | 771d7c8ba5dd0c64d72556712b5d4329878a20cc | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of pmap and related functionality.
Note on ShardingSpecs and spec_to_indices():
A ShardingSpec describes at a high level how a logical array is sharded across
devices (each ShardedDeviceArray has a ShardingSpec, and ShardingSpecs also
describe how to shard inputs to a parallel computation). spec_to_indices()
encodes exactly how a given ShardingSpec is translated to device buffers,
i.e. how the sharded array is "laid out" across devices. Given a sequence of
devices, we shard the data across the devices in row-major order, with
replication treated as an extra inner dimension.
For example, given the logical data array [1, 2, 3, 4], if we were to partition
this array 4 ways with a replication factor of 2, for a total of 8 devices, the
data on each device would be: [1, 1], [2, 2], [3, 3], [4, 4].
This encoding is assumed by various parts of the system, e.g. generating
replica groups for collective operations.
"""
from collections import defaultdict
from contextlib import contextmanager
from itertools import product
import operator as op
import threading
from typing import (Any, Callable, Dict, List, Optional, Sequence, Set, Tuple,
Type, Union)
from absl import logging
import numpy as onp
from ..config import flags
from .. import core
from .. import linear_util as lu
from .. import lazy
from .. import source_info_util
from ..abstract_arrays import (ConcreteArray, ShapedArray, array_types,
raise_to_shaped)
from ..util import (partial, unzip2, unzip3, prod, safe_map, safe_zip,
extend_name_stack, wrap_name)
from ..lib import xla_bridge as xb
from ..lib import xla_client as xc
from ..tree_util import tree_flatten, tree_map
from .batching import broadcast, not_mapped
from . import batching
from . import partial_eval as pe
from . import xla
from . import ad
xops = xc.ops
FLAGS = flags.FLAGS
_map = safe_map
Index = Union[int, slice, Tuple[Union[int, slice], ...]]
# TODO(skye): make this a namedtuple. This may allow us to use ShardingSpecs in
# performance-sensitive code, e.g. shard_args.
class ShardingSpec:
"""Describes how a logical array is sharded across devices.
Note this does not specify the physical devices to be sharded across, nor a
logical ordering of data shards. Use `spec_to_indices` to resolve a
ShardingSpec to the specific logical ordering expected throughout the system.
Sharding includes "replication", where the same data is present on multiple
devices. Replication is always applied to the entire logical array, i.e. the
whole array is copied N times, although each copy may still be sharded
according to the rest of the ShardingSpec. This means that unlike other kinds
of sharding, replication isn't associated with a particular logical array
axis. However, it does have a position relative to the logical array axes,
which is necessary to specify how replication is mapped to devices in
`spec_to_indices`. One to think about this is if you added an extra length-N
logical axis containing the N copies of the original array, where would that
new axis go? This would affect the final buffer order computed in
`spec_to_indices`.
Attributes:
shards_per_axis: a tuple the same length as the array shape. Indicates how
many shards each axis is divided into. Each axis must be divided into
equal-sized shards (i.e. array_shape[i] % shards_per_axis[i] == 0).
is_axis_materialized: a tuple the same length as the array shape. Indicates
whether each axis of the array is represented in the on-device shape
(i.e. sum(is_axis_materialized) == len(device_buffer.shape())). Any
unmaterialized axes must be sharded into size-1 chunks
(i.e. array_shape[i] == shards_per_axis[i]).
replication_factors: list of tuples of (factor, index) describing how many
times the array is replicated and before which logical axis index each
virtual replication axis is inserted.
"""
def __init__(self,
shards_per_axis: Tuple[int, ...],
is_axis_materialized: Tuple[bool, ...],
replication_factors: List[Tuple[int, int]]):
assert len(shards_per_axis) == len(is_axis_materialized)
self.shards_per_axis = shards_per_axis
self.is_axis_materialized = is_axis_materialized
self.replication_factors = replication_factors
def __eq__(self, other):
return (self.shards_per_axis == other.shards_per_axis and
self.is_axis_materialized == other.is_axis_materialized and
self.replication_factors == other.replication_factors)
def __repr__(self):
return ("ShardingSpec(shards_per_axis=%s, is_axis_materialized=%s, "
"replication_factor=%s)" %
(self.shards_per_axis, self.is_axis_materialized,
self.replication_factors))
def spec_to_indices(shape: Tuple[int, ...],
sharding_spec: ShardingSpec) -> Tuple[Index, ...]:
"""Returns numpy-style indices corresponding to sharding_spec.
Each index describes a shard of the array. The order of the indices is the
same as the device_buffers of a ShardedDeviceArray (i.e. the data is laid out
row-major, with replication treated as an extra innermost dimension).
Args:
shape: The shape of the logical array being sharded.
sharding_spec: Describes how the array is sharded.
Returns:
A tuple of length `prod(sharding_spec.shards_per_axis) *
prod(factor for factor, index in sharding_spec.replication_factors)`. Each
element is an int, a slice object with step=1, or a tuple thereof, to be
treated as an index into the full logical array.
"""
assert len(shape) == len(sharding_spec.shards_per_axis)
if not shape:
# special case: scalars can only be indexed by `()`
total_replication_factor = int(prod(factor for factor, index in
sharding_spec.replication_factors))
return ((),) * total_replication_factor
replication_factors = sorted(sharding_spec.replication_factors,
key=op.itemgetter(1))
logical_index = 0
indices_per_mesh_axis = []
for mesh_index in range(len(shape) + len(sharding_spec.replication_factors)):
if replication_factors and replication_factors[0][1] == logical_index:
# Insert a placeholder `None` to represent a replication factor. These
# will all be removed later, since they don't correspond to logical axes.
factor, _ = replication_factors.pop(0)
indices_per_mesh_axis.append([None] * factor)
else:
indices = _axis_indices(
shape[logical_index],
sharding_spec.shards_per_axis[logical_index],
sharding_spec.is_axis_materialized[logical_index])
indices_per_mesh_axis.append(indices)
logical_index += 1
assert logical_index == len(shape) and not replication_factors
indices = list(product(*indices_per_mesh_axis))
# remove placeholder `None`s and trailing colons, then unwrap
# single-element tuples
def canonicalize(index):
index = [i for i in index if i is not None]
while len(index) > 1 and index[-1] == slice(None):
index.pop(-1)
assert index
if len(index) == 1:
return index[0]
return tuple(index)
return tuple(canonicalize(index) for index in indices)
def _axis_indices(axis_size, num_shards, is_materialized):
if not is_materialized:
assert axis_size == num_shards
return list(range(axis_size))
if num_shards == 1:
return [slice(None)]
shard_size, ragged = divmod(axis_size, num_shards)
assert not ragged
return [slice(i * shard_size, (i + 1) * shard_size) for i in range(num_shards)]
### util
def identity(x): return x
# TODO(skye): expose PyLocalBuffers in xla_client
def shard_args(devices: Sequence[xb.xla_client.Device],
indices: Sequence[Sequence[Index]],
args) -> Sequence[Sequence[xb.xla_client._xla.PyLocalBuffer]]:
"""Shard each argument data array along its leading axis.
Args:
devices: sequence of Devices mapping replica index to a physical device.
indices: sequence of the same length as `args` describing how each arg
should be sharded/replicated across `devices`. Each element in `indices`
is the same length as `devices`.
args: a sequence of JaxTypes representing arguments to be sharded according
to `indices` and placed on `devices`.
Returns:
A list of device buffers with the same length as `devices` indexed by
replica number, so that the nth element is the argument to be passed to the
nth replica.
"""
nargs, nrep = len(args), len(devices)
buffers = [[None] * nargs for _ in range(nrep)]
for a, arg in enumerate(args):
# The shard_arg_handlers allow an extensible set of types to be sharded, but
# inline handling for ShardedDeviceArray as a special case for performance
# NOTE: we compare indices instead of sharding_spec because
# pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.
if type(arg) is ShardedDeviceArray and indices[a] == arg.indices:
for r, buf in enumerate(arg.device_buffers):
buffers[r][a] = (buf if buf.device() == devices[r]
else buf.copy_to_device(devices[r]))
else:
arg = xla.canonicalize_dtype(arg)
bufs = shard_arg_handlers[type(arg)](arg, devices, indices[a])
for r, buf in enumerate(bufs):
buffers[r][a] = buf
return buffers
shard_arg_handlers: Dict[Any, Callable[[Any, Any, Any], Sequence[Any]]] = {}
shard_arg_handlers[core.Unit] = \
lambda x, devices, _: [xla.device_put(core.unit, d) for d in devices]
def _shard_array(x, devices, indices):
return [xla.device_put(x[i], d) for (i, d) in zip(indices, devices)]
for _t in array_types:
shard_arg_handlers[_t] = _shard_array
def _shard_device_array(x, devices, indices):
start_indices, limit_indices, removed_dims = map(tuple, unzip3(
_as_slice_indices(x, idx) for idx in indices))
shards = x._multi_slice(start_indices, limit_indices, removed_dims)
return [xla.device_put(s, d) for s, d in zip(shards, devices)]
shard_arg_handlers[xla.DeviceArray] = _shard_device_array
# NOTE(skye): we could refactor to generate _multi_slice parameters directly
# from the input ShardingSpec, rather than the indices. However, this would
# require duplicating the ordering logic of spec_to_indices, which is more
# subtle and more likely to change than the index logic we have to support here.
def _as_slice_indices(arr: xla.DeviceArray, idx: Index) -> Tuple[
Tuple[int, ...], Tuple[int, ...], Tuple[int, ...]]:
"""Returns start_indices, limit_indices, removed_dims"""
start_indices = [0] * arr.ndim
limit_indices = list(arr.shape)
removed_dims = []
tuple_idx = idx if isinstance(idx, tuple) else (idx,)
for dim, sub_idx in enumerate(tuple_idx):
if isinstance(sub_idx, int):
start_indices[dim] = sub_idx
limit_indices[dim] = sub_idx + 1
removed_dims.append(dim)
elif sub_idx == slice(None):
continue
else:
assert isinstance(sub_idx, slice)
assert isinstance(sub_idx.start, int)
assert isinstance(sub_idx.stop, int)
start_indices[dim] = sub_idx.start
limit_indices[dim] = sub_idx.stop
return tuple(start_indices), tuple(limit_indices), tuple(removed_dims) # type: ignore
def shard_aval(size, aval):
try:
return shard_aval_handlers[type(aval)](size, aval)
except KeyError as err:
raise TypeError("No shard_aval handler for type: {}".format(type(aval))
) from err
shard_aval_handlers: Dict[Type[core.AbstractValue], Callable[[int, Any], Any]] = {}
shard_aval_handlers[core.AbstractUnit] = lambda size, x: x
def _shard_abstract_array(size, x):
if not x.shape:
raise ValueError("Scalar cannot be split across {} shards.".format(size))
if x.shape[0] != size:
raise ValueError("Axis size {} does not match leading dimension of "
"shape {}".format(size, x.shape))
return ShapedArray(x.shape[1:], x.dtype)
shard_aval_handlers[ShapedArray] = _shard_abstract_array
# TODO(skye): expose PyLocalBuffers in xla_client
def aval_to_result_handler(sharding_spec: Optional[ShardingSpec],
indices: Optional[Tuple[Index]],
aval: core.AbstractValue) -> Callable[
[List[xb.xla_client._xla.PyLocalBuffer]], Any]:
"""Returns a function for handling the raw buffers of a single output aval.
Args:
sharding_spec: indicates how the output is sharded across devices, or None
for non-array avals.
indices: the pre-computed result of spec_to_indices, or None for non-array
avals.
aval: the output AbstractValue.
Returns:
A function for handling the PyLocalBuffers that will eventually be produced
for this output. The function will return an object suitable for returning
to the user, e.g. a ShardedDeviceArray.
"""
try:
return pxla_result_handlers[type(aval)](sharding_spec, indices, aval)
except KeyError as err:
raise TypeError("No pxla_result_handler for type: {}".format(type(aval))
) from err
PxlaResultHandler = Callable[..., Callable[
[List[xb.xla_client._xla.PyLocalBuffer]], Any]]
pxla_result_handlers: Dict[Type[core.AbstractValue], PxlaResultHandler] = {}
pxla_result_handlers[core.AbstractUnit] = lambda *_: lambda _: core.unit
def array_result_handler(sharding_spec, indices, aval: ShapedArray):
return lambda bufs: ShardedDeviceArray(aval, sharding_spec, bufs, indices)
pxla_result_handlers[ShapedArray] = array_result_handler
pxla_result_handlers[ConcreteArray] = array_result_handler
### applying parallel primitives in op-by-op Python dispatch
# There are at least two cases where we might want to evaluate a parallel
# primitive dispatched from Python, rather than being staged out:
# 1. axis_size = psum(1, 'axis_name'),
# 2. to enable an implicit outermost pmap-like context for multi-host
# multi-controller SPMD programs.
# In each case, we can't rely on any data dependence on a pmap trace; instead we
# need some dynamic context, basically modeling the axis name environment stack.
# To handle the former case, we don't need to communicate at all; we instead
# have a table of parallel_pure_rules. To handle the latter case, we'll have a
# globally-scoped root environment frame and compile and execute a single-op
# XLA collective.
class DynamicAxisEnvFrame(object):
__slots__ = ["name", "pmap_trace", "hard_size", "soft_trace", "soft_size"]
def __init__(self, name, pmap_trace, hard_size):
self.name = name
self.pmap_trace = pmap_trace
self.hard_size = hard_size
self.soft_trace = None
self.soft_size = None
class DynamicAxisEnv(list):
def __contains__(self, axis_name):
return axis_name in (frame.name for frame in self)
def __getitem__(self, axis_name):
if axis_name not in self:
raise NameError("unbound axis name: {}".format(axis_name))
for frame in reversed(self):
if frame.name == axis_name:
return frame
else:
assert False
@property
def sizes(self):
return tuple(frame.hard_size for frame in self)
@property
def nreps(self):
return prod(frame.hard_size for frame in self)
class _ThreadLocalState(threading.local):
def __init__(self):
self.dynamic_axis_env = DynamicAxisEnv()
_thread_local_state = _ThreadLocalState()
@contextmanager
def extend_dynamic_axis_env(axis_name, pmap_trace, hard_size):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
dynamic_axis_env.append(DynamicAxisEnvFrame(axis_name, pmap_trace, hard_size))
try:
yield
finally:
dynamic_axis_env.pop()
def unmapped_device_count(backend=None):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
mapped = prod(frame.hard_size for frame in dynamic_axis_env)
unmapped, ragged = divmod(xb.device_count(backend), mapped)
assert not ragged and unmapped > 0
return unmapped
def apply_parallel_primitive(prim, *args, **params):
# This is the op-by-op version of applying a collective primitive, like a psum
# that doesn't have a data dependence on the argument of a pmap function. In
# particular, this code gets hit when we write `axis_size = psum(1, 'i')`. We
# look up information in the dynamic axis env.
dynamic_axis_env = _thread_local_state.dynamic_axis_env
axis_name = params.pop('axis_name')
axis_index_groups = params.pop('axis_index_groups')
if axis_index_groups is not None:
shape = (len(axis_index_groups[0]),)
else:
logical_size = lambda frame: frame.hard_size * (frame.soft_size or 1)
if isinstance(axis_name, (list, tuple)):
shape = tuple(logical_size(dynamic_axis_env[name]) for name in axis_name)
else:
shape = (logical_size(dynamic_axis_env[axis_name]),)
return parallel_pure_rules[prim](*args, shape=shape, **params)
parallel_pure_rules: Dict[core.Primitive, Callable] = {}
def axis_index(axis_name):
"""Return the index along the pmapped axis ``axis_name``.
Args:
axis_name: hashable Python object used to name the pmapped axis (see the
:func:`jax.pmap` documentation for more details).
Returns:
An integer representing the index.
For example, with 8 XLA devices available:
>>> from functools import partial
>>> @partial(pmap, axis_name='i')
... def f(_):
... return lax.axis_index('i')
...
>>> f(np.zeros(4))
ShardedDeviceArray([0, 1, 2, 3], dtype=int32)
>>> f(np.zeros(8))
ShardedDeviceArray([0, 1, 2, 3, 4, 5, 6, 7], dtype=int32)
>>> @partial(pmap, axis_name='i')
... @partial(pmap, axis_name='j')
... def f(_):
... return lax.axis_index('i'), lax.axis_index('j')
...
>>> x, y = f(np.zeros((4, 2)))
>>> print(x)
[[0 0]
[1 1]
[2 2]
[3 3]]
>>> print(y)
[[0 1]
[0 1]
[0 1]
[0 1]]
"""
return axis_index_p.bind(axis_name=axis_name)
def _axis_index_bind(*, axis_name):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
frame = dynamic_axis_env[axis_name]
sizes = dynamic_axis_env.sizes[:dynamic_axis_env.index(frame)+1]
nreps = dynamic_axis_env.nreps
trace = frame.pmap_trace
out_aval = ShapedArray((), onp.int32)
out_tracer = pe.JaxprTracer(trace, pe.PartialVal.unknown(out_aval), None)
eqn = pe.new_eqn_recipe([], [out_tracer], axis_index_p,
dict(nreps=nreps, sizes=sizes,
soft_size=frame.soft_size, axis_name=axis_name),
source_info_util.current())
out_tracer.recipe = eqn
if not frame.soft_trace:
return out_tracer
else:
val_out = out_tracer * frame.soft_size + onp.arange(frame.soft_size)
return SplitAxisTracer(frame.soft_trace, axis_name, val_out)
def _axis_index_translation_rule(c, nreps, sizes, soft_size, axis_name):
div = xb.constant(c, onp.array(nreps // prod(sizes), dtype=onp.uint32))
mod = xb.constant(c, onp.array(sizes[-1], dtype=onp.uint32))
unsigned_index = xops.Rem(xops.Div(xops.ReplicaId(c), div), mod)
return xops.ConvertElementType(unsigned_index, xb.dtype_to_etype(onp.int32))
axis_index_p = core.Primitive('axis_index')
axis_index_p.def_custom_bind(_axis_index_bind)
axis_index_p.def_abstract_eval(
lambda *args, **params: ShapedArray((), onp.int32))
xla.translations[axis_index_p] = _axis_index_translation_rule
### lazy device-memory persistence and result handling
class ShardedDeviceArray(xla.DeviceArray):
"""A ShardedDeviceArray is an ndarray sharded across devices.
The purpose of a ShardedDeviceArray is to reduce the number of transfers when
executing replicated computations, by allowing results to persist on the
devices that produced them. That way dispatching a similarly replicated
computation that consumes the same sharded memory layout does not incur any
transfers.
A ShardedDeviceArray represents one logical ndarray value, and simulates the
behavior of an ndarray so that it can be treated by user code as an ndarray;
that is, it is only an optimization to reduce transfers.
Attributes:
aval: A ShapedArray indicating the shape and dtype of this array.
sharding_spec: describes how this array is sharded across `device_buffers`.
device_buffers: the buffers containing the data for this array. Each buffer
is the same shape and on a different device. Buffers are in row-major
order, with replication treated as an extra innermost dimension.
indices: the result of spec_to_indices(sharding_spec). Can optionally be
precomputed for efficiency. A list the same length as
`device_buffers`. Each index indicates what portion of the full array is
stored in the corresponding device buffer, i.e. `array[indices[i]] ==
device_buffers[i].to_py()`.
"""
__slots__ = ["device_buffers", "sharding_spec", "indices",
"_one_replica_buffer_indices"]
# TODO(skye): expose PyLocalBuffers in xla_client
def __init__(self,
aval: ShapedArray,
sharding_spec, # TODO(skye): add type annotation back, see below
device_buffers: List[xb.xla_client._xla.PyLocalBuffer] = None,
indices: Optional[Tuple[Index, ...]] = None):
# TODO(skye): this is temporary staging while we switch users over to
# providing sharding_spec. It assumes that any pre-existing callers are
# creating pmap-style ShardedDeviceArrays.
if device_buffers is None:
device_buffers = sharding_spec
sharded_aval = ShapedArray(aval.shape[1:], aval.dtype)
sharding_spec = _pmap_sharding_spec(aval.shape[0], aval.shape[0],
1, None, sharded_aval, True)
# TODO(skye): assert invariants. Keep performance in mind though.
if indices is None:
indices = spec_to_indices(aval.shape, sharding_spec)
self.aval = aval
self.device_buffers = device_buffers
self.sharding_spec = sharding_spec
self.indices = indices
self._npy_value = None
self._one_replica_buffer_indices = None
if not core.skip_checks:
assert type(aval) is ShapedArray
@property
def one_replica_buffer_indices(self):
"""Indices of buffers containing one complete copy of the array data."""
if self._one_replica_buffer_indices is None:
one_replica_indices = []
seen_index_hashes = set()
for i, index in enumerate(self.indices):
hashed_index = _hashable_index(index)
if hashed_index not in seen_index_hashes:
one_replica_indices.append(i)
seen_index_hashes.add(hashed_index)
self._one_replica_buffer_indices = one_replica_indices
return self._one_replica_buffer_indices
def copy_to_host_async(self):
for buffer_index in self.one_replica_buffer_indices:
self.device_buffers[buffer_index].copy_to_host_async()
def delete(self):
for buf in self.device_buffers:
buf.delete()
self.device_buffers = None
self._npy_value = None
def _check_if_deleted(self):
if self.device_buffers is None:
raise ValueError("ShardedDeviceArray has been deleted.")
def block_until_ready(self):
self._check_if_deleted()
for buf in self.device_buffers:
buf.block_host_until_ready()
return self
@property
def _value(self):
if self._npy_value is None:
self.copy_to_host_async()
npy_value = onp.empty(self.aval.shape, self.aval.dtype)
for i in self.one_replica_buffer_indices:
npy_value[self.indices[i]] = self.device_buffers[i].to_py()
self._npy_value = npy_value
return self._npy_value
def __getitem__(self, idx):
if self._npy_value is None and idx in self.indices:
buf = self.device_buffers[self.indices.index(idx)]
aval = ShapedArray(buf.shape().dimensions(), self.aval.dtype)
return xla.DeviceArray(aval, None, lazy.array(aval.shape), buf)
else:
return super(ShardedDeviceArray, self).__getitem__(idx)
def _hashable_index(idx):
return tree_map(lambda x: (x.start, x.stop) if type(x) == slice else x,
idx)
# The fast path is handled directly in shard_args().
# TODO(skye): is there a simpler way to rewrite this using sharding_spec?
def _shard_sharded_device_array_slow_path(x, devices, indices):
candidates = defaultdict(list)
for buf, idx in zip(x.device_buffers, x.indices):
candidates[_hashable_index(idx)].append(buf)
bufs = []
for idx, device in safe_zip(indices, devices):
# Look up all buffers that contain the correct slice of the logical array.
candidates_list = candidates[_hashable_index(idx)]
if not candidates_list:
# This array isn't sharded correctly. Reshard it via host roundtrip.
# TODO(skye): more efficient reshard?
return shard_arg_handlers[type(x._value)](x._value, devices, indices)
# Try to find a candidate buffer already on the correct device,
# otherwise copy one of them.
for buf in candidates_list:
if buf.device() == device:
bufs.append(buf)
break
else:
bufs.append(buf.copy_to_device(device))
return bufs
shard_arg_handlers[ShardedDeviceArray] = _shard_sharded_device_array_slow_path
def _sharded_device_array_constant_handler(c, val, canonicalize_types=True):
return xb.constant(c, onp.asarray(val), canonicalize_types=canonicalize_types)
xb.register_constant_handler(ShardedDeviceArray, _sharded_device_array_constant_handler)
core.pytype_aval_mappings[ShardedDeviceArray] = ConcreteArray
xla.device_put_handlers[ShardedDeviceArray] = xla._device_put_array
xla.pytype_aval_mappings[ShardedDeviceArray] = op.attrgetter('aval')
xla.canonicalize_dtype_handlers[ShardedDeviceArray] = identity
### the xla_pmap primitive and its rules are comparable to xla_call in xla.py
def xla_pmap_impl(fun: lu.WrappedFun, *args, backend, axis_name, axis_size, global_axis_size,
devices, name, mapped_invars, donated_invars):
abstract_args = map(xla.abstractify, args)
compiled_fun = parallel_callable(fun, backend, axis_name, axis_size,
global_axis_size, devices, name, mapped_invars,
donated_invars, *abstract_args)
return compiled_fun(*args)
@lu.cache
def parallel_callable(fun, backend, axis_name, axis_size, global_axis_size,
devices, name, mapped_invars, donated_invars, *avals):
if devices is not None and len(devices) == 0:
raise ValueError("'devices' argument to pmap must be non-empty, or None.")
inner_pmap = len(_thread_local_state.dynamic_axis_env) > 0
# Determine global_axis_size for use in AxisEnv.
if xb.host_count() > 1 and global_axis_size is None and inner_pmap:
raise ValueError("'axis_size' must be specified for nested multi-host pmaps")
if (xb.host_count() == 1 and global_axis_size is not None and
global_axis_size != axis_size):
raise ValueError(
f"Specified axis_size {global_axis_size} doesn't match received "
f"axis_size {axis_size}.")
must_run_on_all_devices = False
no_nested_sharding = False
if global_axis_size is None:
if xb.host_count() == 1:
global_axis_size = axis_size
elif devices:
# This allows each host in a multi-host pmap to run on a different number
# of devices, but precludes nested sharding (i.e. inner pmaps or
# sharded_jits).
global_axis_size = len(devices)
no_nested_sharding = True
else:
# This assumes all hosts run on the same number of devices. We make sure
# this assumption is true by requiring that the pmap is run on all devices
# (and making the further assumption that each host has the same number of
# devices). Nested sharding is ok in this case.
global_axis_size = axis_size * xb.host_count()
assert all(len(xb.local_devices(host_id)) == xb.local_device_count()
for host_id in xb.host_ids())
must_run_on_all_devices = True
if devices:
local_devices = [d for d in devices if d.host_id == xb.host_id()]
assert len(local_devices) > 0
else:
local_devices = None
@lu.wrap_init
def dynamic_fun(dummy, *args):
with extend_dynamic_axis_env(axis_name, dummy._trace, global_axis_size):
return fun.call_wrapped(*args)
sharded_avals = tuple(shard_aval(axis_size, aval) if m else aval
for m, aval in zip(mapped_invars, avals))
pvals = [pe.PartialVal.unknown(aval) for aval in sharded_avals]
# We add a dummy first invar, to carry the trace details to `dynamic_fun`
pval = pe.PartialVal.unknown(core.abstract_unit) # dummy value for axis env
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(
dynamic_fun, [pval] + pvals, instantiate=False, stage_out=True, bottom=True)
jaxpr.invars = jaxpr.invars[1:] # ignore dummy
jaxpr, uses_outfeed = xla.apply_outfeed_rewriter(jaxpr)
out_pvs, out_consts = unzip2(out_pvals)
# TODO(skye,mattjj): allow more collectives on multi-host as we test them, but
# for now raise an error
if devices is not None:
is_multi_host_pmap = any(d.host_id != xb.host_id() for d in devices)
else:
is_multi_host_pmap = xb.host_count() > 1
if is_multi_host_pmap:
used_collectives = set(xla.jaxpr_collectives(jaxpr))
if not used_collectives.issubset(multi_host_supported_collectives):
msg = "using collectives that aren't supported for multi-host: {}"
raise TypeError(msg.format(", ".join(map(str, used_collectives))))
if all(pv is None for pv in out_pvs):
# When the output doesn't depend on the input we don't need to compile an
# XLA computation at all; we handle this as a special case so we can stage
# out multi-replica XLA computations regardless of the hardware available.
# The 'None' values here are just dummies we know will be ignored.
handlers = [
_pval_to_result_handler(axis_size, None, None, None, pval, local_devices,
backend) for pval in out_pvals
]
results = [handler(None) for handler in handlers]
return lambda *_: results
# TODO: replace this with a chain of pmaps and/or sharded_jits
jaxpr_replicas = xla.jaxpr_replicas(jaxpr)
num_local_replicas = axis_size * jaxpr_replicas
num_global_replicas = global_axis_size * jaxpr_replicas
arg_parts, out_parts, num_partitions = _find_partitions(jaxpr)
num_local_shards = num_local_replicas * num_partitions
num_global_shards = num_global_replicas * num_partitions
# This error checking logic is all screwed up for nested pmaps, luckily we
# won't have to handle this case with omnistaging.
if (not inner_pmap and
must_run_on_all_devices and num_local_shards != xb.local_device_count()):
if num_local_shards == axis_size:
raise ValueError(
f"On multi-host platforms, the input to pmapped functions must have "
f"leading axis size equal to the number of local devices if no "
f"`devices` argument is specified. Got axis_size={axis_size}, "
f"num_local_devices={xb.local_device_count()}")
else:
raise ValueError(
f"On multi-host platforms, pmapped functions must run across all "
f"devices, i.e. num_replicas * num_partitions should equal the "
f"number of local devices. Got num_replicas={num_local_replicas}, "
f"num_partitions={num_partitions}, and "
f"num_local_devices={xb.local_device_count()}")
if (not inner_pmap and
no_nested_sharding and (jaxpr_replicas > 1 or num_partitions > 1)):
raise ValueError(
f"On multi-host platforms, pmapped functions that both have `devices` "
f"specified and contain an inner_pmap or sharded_jit must specify an "
f"`axis_size` (or remove the `devices` argument). Got nested_replicas="
f"{jaxpr_replicas} and nested_partitions={num_partitions}")
log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG
logging.log(log_priority,
f"Compiling {fun.__name__} for {num_global_shards} devices with "
f"args {avals}. (num_replicas={num_global_replicas} "
f"num_partitions={num_partitions}")
axis_env = xla.AxisEnv(num_global_replicas, (axis_name,), (global_axis_size,), devices)
tuple_args = len(sharded_avals) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("pmap_{}".format(fun.__name__))
xla_consts = _map(partial(xb.constant, c), consts)
replicated = [not m for m in mapped_invars]
xla_args = xla._xla_callable_args(c, sharded_avals, tuple_args, replicated,
arg_parts)
out_nodes = xla.jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts,
extend_name_stack(wrap_name(name, 'pmap')), *xla_args)
build_out_tuple = partial(xops.Tuple, c, out_nodes)
if out_parts is not None:
out_tuple = xb.with_sharding(c, out_parts, build_out_tuple)
else:
out_tuple = build_out_tuple()
backend = xb.get_backend(backend)
if backend.platform == "tpu":
donated_invars = xla.set_up_aliases(c, xla_args, out_tuple, donated_invars, tuple_args)
built = c.Build(out_tuple)
if devices is None:
if num_global_shards > xb.device_count(backend):
msg = ("compiling computation that requires {} logical devices, but only {} XLA "
"devices are available (num_replicas={}, num_partitions={})")
raise ValueError(msg.format(num_global_shards, xb.device_count(backend),
num_global_replicas, num_partitions))
# On a single host, we use the platform's default device assignment to
# potentially take advantage of device locality. On multiple hosts, the
# default device assignment may interleave different hosts' replicas,
# violating pmap's semantics where data is sharded across replicas in
# row-major order. Instead, manually create a device assignment that ensures
# each host is responsible for a continguous set of replicas.
if num_global_replicas > num_local_replicas:
# TODO(skye): use a locality-aware assignment that satisfies the above
# constraint.
devices = [d for host_id in xb.host_ids()
for d in xb.local_devices(host_id)]
else:
devices = xb.get_backend(backend).get_default_device_assignment(
num_global_replicas, num_partitions)
else:
if num_local_shards != len(local_devices):
local_devices_str = ", ".join(map(str, local_devices))
raise ValueError(
"Leading axis size of input to pmapped function must equal the "
"number of local devices passed to pmap. Got axis_size=%d, "
"num_local_devices=%d.\n(Local devices passed to pmap: %s)"
% (axis_size, len(local_devices), local_devices_str))
if num_global_shards != len(devices):
raise ValueError("compiling computation that creates %s shards, "
"but %s devices were specified" %
(num_global_shards, len(devices)))
# 'devices' may be 1D or 2D at this point (e.g.
# get_default_device_assignment() returns 2D assignment, caller may have
# provided 1D list of devices).
device_assignment = tree_map(lambda d: d.id, devices)
# Convert to 2D in case it's 1D and we have > 1 partitions.
device_assignment = onp.array(device_assignment).reshape(
(num_global_replicas, num_partitions))
compile_options = xb.get_compile_options(
num_replicas=num_global_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment)
compile_options.parameter_is_tupled_arguments = tuple_args
compiled = backend.compile(built, compile_options=compile_options)
input_sharding_specs = [
_pmap_sharding_spec(
num_local_replicas, axis_size, num_partitions, parts, aval, mapped)
for (aval, parts, mapped)
in safe_zip(sharded_avals, arg_parts or [None] * len(avals),
mapped_invars)]
input_indices = [spec_to_indices(aval.shape, spec)
if spec is not None else None
for aval, spec in zip(avals, input_sharding_specs)]
handle_args = partial(shard_args, compiled.local_devices(), input_indices)
handle_outs = _pvals_to_results_handler(axis_size, num_local_replicas,
num_partitions, out_parts,
out_pvals, compiled.local_devices(),
backend)
return partial(execute_replicated, compiled, uses_outfeed, backend, handle_args,
handle_outs)
multi_host_supported_collectives: Set[core.Primitive] = set()
PartitionsOrReplicated = Optional[Tuple[int, ...]]
def _find_partitions(jaxpr) -> Tuple[
Optional[Tuple[PartitionsOrReplicated, ...]],
Optional[Tuple[PartitionsOrReplicated, ...]],
int]:
"""Returns (in_partitions, out_partitions, num_partitions)."""
for eqn in jaxpr.eqns:
if eqn.primitive.name == "sharded_call":
if len(jaxpr.eqns) > 1:
raise NotImplementedError(
"pmap of sharded_jit + non-sharded operations not yet implemented.")
num_partitions = reconcile_num_partitions(eqn.params["call_jaxpr"],
eqn.params["num_partitions"])
return (eqn.params["in_parts"], eqn.params["out_parts_thunk"](),
num_partitions)
return None, None, 1
def reconcile_num_partitions(jaxpr, outer_num_parts: Optional[int]):
"""Returns the total number of partitions to use.
Validates that any inner partitioning matches outer_num_parts if provided, and
returns the number of partitions to use based on outer_num_parts and any inner
partitioning.
"""
inner_num_parts = _inner_partitions(jaxpr, outer_num_parts)
if outer_num_parts is None and inner_num_parts is None:
# No partitions specified anywhere, everything is replicated.
return 1
if outer_num_parts is None:
return inner_num_parts
return outer_num_parts
def _inner_partitions(jaxpr, expected_num_parts: Optional[int]):
"""Returns the total number of partitions from PartitionSpecs inside `jaxpr`.
Also validates that this number matches `expected_num_parts` if provided.
"""
for eqn in jaxpr.eqns:
if eqn.primitive.name in ["sharding_constraint", "infeed"]:
parts = eqn.params["partitions"]
nparts = get_num_partitions(parts)
if expected_num_parts is None:
expected_num_parts = nparts
elif nparts is not None and nparts != expected_num_parts:
# TODO(skye): raise this error as we trace the jaxpr
raise ValueError(
f"with_sharding_constraint with partitions={parts} "
f"(total partitions: {nparts}) doesn't match expected number of "
f"partitions: {expected_num_parts}. If these partitions look "
f"right, check outer sharded_jit and/or other "
f"with_sharding_constraint calls.")
else:
for subjaxpr in core.jaxprs_in_params(eqn.params):
expected_num_parts = _inner_partitions(subjaxpr, expected_num_parts)
return expected_num_parts
def get_num_partitions(*partitions):
partition_specs = tree_flatten(partitions)[0]
if len(partition_specs) == 0:
# Everything is specified as replicated (all Nones).
return None
num_partitions_set = set(onp.prod(spec) for spec in partition_specs)
if len(num_partitions_set) > 1:
raise ValueError(
f"All partition specs must use the same number of total partitions, "
f"got {partitions}, with distinct number of partitions "
f"{num_partitions_set} (the total number of partitions is the product "
f"of a partition spec)")
assert len(num_partitions_set) == 1
return num_partitions_set.pop()
class ResultToPopulate(object): pass
result_to_populate = ResultToPopulate()
def _pvals_to_results_handler(
size, nrep, npart,
out_parts: Optional[Tuple[PartitionsOrReplicated, ...]],
out_pvals, devices, backend):
nouts = len(out_pvals)
if out_parts is None:
out_parts = (None,) * len(out_pvals)
handlers = [
_pval_to_result_handler(size, nrep, npart, parts, pval, devices, backend)
for pval, parts in safe_zip(out_pvals, out_parts)
]
def handler(out_bufs):
assert nrep * npart == len(out_bufs)
buffers = [[result_to_populate] * nrep * npart for _ in range(nouts)]
for r, tuple_buf in enumerate(out_bufs):
for i, buf in enumerate(tuple_buf):
buffers[i][r] = buf
assert not any(buf is result_to_populate for bufs in buffers
for buf in bufs)
return [h(bufs) for h, bufs in zip(handlers, buffers)]
return handler
def replicate(val, axis_size, nrep, devices=None, backend=None):
"""Replicates ``val`` across multiple devices.
Args:
val: the value to be replicated.
axis_size: the length of the output, i.e. the logical number of replicas to
create. Usually equal to `nrep`, but in the case of nested pmaps, `nrep` may
be a multiple of `axis_size`.
nrep: the number of replicas to create. If ``devices`` is set, must be equal
to ``len(devices)``.
devices: the devices to replicate across. If None, ``nrep`` will be used to
generate a default device assignment.
backend: string specifying which backend to use.
Returns:
A ShardedDeviceArray of length `axis_size` where each shard is equal to
``val``.
"""
device_count = (len(devices) if devices else xb.local_device_count())
if nrep > device_count:
msg = ("Cannot replicate across %d replicas because only %d local devices "
"are available." % (nrep, device_count))
if devices:
msg += (" (local devices = %s)"
% ", ".join(map(str, devices)) if devices else str(None))
raise ValueError(msg)
if devices is None:
assert nrep is not None
# TODO(skye): use different device assignment on multihost
devices = xb.get_backend(backend).get_default_device_assignment(nrep)
assert nrep == len(devices)
aval = xla.abstractify(val) # type: ShapedArray
replicated_aval = ShapedArray((axis_size,) + aval.shape, aval.dtype)
# TODO(skye): figure out how partitioning should work here
sharding_spec = _pmap_sharding_spec(nrep, axis_size, 1, None, aval, True)
device_buffers = [xla.device_put(val, d) for d in devices]
return ShardedDeviceArray(replicated_aval, sharding_spec, device_buffers)
def _pval_to_result_handler(axis_size, nrep, npart, parts, pval, devices, backend):
if devices:
assert all(d.host_id == xb.host_id(backend) for d in devices)
pv, const = pval
if pv is None:
if nrep is None:
nrep = axis_size
# If 'const' is a ShardedDeviceArray, it must have come from a pmap nested
# inside the one we're currently evaluating, and we should replicate
# 'const' across the total number of devices needed. We don't necessarily
# know the nested pmap's axis_size (e.g. the jaxpr for
# pmap(pmap(lambda x: 3)) is trivial, with no pmaps), but we can use the
# axis size of the output 'const'.
# TODO: we might be doing unnecessary device transfers in the inner pmap.
if isinstance(const, ShardedDeviceArray):
nrep *= len(const)
bcast_const = (core.unit if const is core.unit
else replicate(const, axis_size, nrep, devices, backend))
return lambda _: bcast_const
else:
if pv is not core.abstract_unit:
unsharded_aval = ShapedArray((axis_size,) + pv.shape, pv.dtype)
sharding_spec = _pmap_sharding_spec(nrep, axis_size, npart, parts, pv,
True)
indices = spec_to_indices(unsharded_aval.shape, sharding_spec)
else:
sharding_spec = indices = None
unsharded_aval = pv
return aval_to_result_handler(sharding_spec, indices, unsharded_aval)
def _pmap_sharding_spec(nrep, axis_size, npart, parts, sharded_aval, mapped):
"""Sharding spec for arguments or results of a pmap.
Args:
nrep: number of local XLA replicas (product of local axis sizes)
axis_size: local axis size for outer pmap
npart: total number of XLA partitions (required by sharded_jit calls)
parts: the partitioning of the value or None
sharded_aval: the aval of the value inside the outer pmap
mapped: whether the value is mapped in the outer pmap
Returns:
A ShardingSpec.
"""
if sharded_aval is core.abstract_unit:
return None
replication_factor, ragged = divmod(nrep, axis_size)
assert not ragged
# get the sharding spec from inner sharded_jits as if we weren't in a pmap
shard_spec = partitioned_sharding_spec(npart, parts, sharded_aval)
assert shard_spec is not None # hint for pytype
if mapped:
# replication_factor represents the product of inner pmaps, so it goes
# after the outer pmapped axis at index 0
replication_factors = [] if replication_factor == 1 else [(replication_factor, 1)]
replication_factors.extend((factor, index + 1) for factor, index
in shard_spec.replication_factors)
return ShardingSpec(
shards_per_axis=(axis_size,) + shard_spec.shards_per_axis,
is_axis_materialized=(False,) + shard_spec.is_axis_materialized,
replication_factors=replication_factors)
else:
return ShardingSpec(
shards_per_axis=shard_spec.shards_per_axis,
is_axis_materialized=shard_spec.is_axis_materialized,
replication_factors=[(replication_factor * axis_size, 0)] +
shard_spec.replication_factors)
def partitioned_sharding_spec(num_partitions: int,
partitions: Optional[Sequence[int]], aval):
if aval is core.abstract_unit:
return None
if partitions is None:
# hit by both replicated sharded_jit and no sharded_jit
# we drop the extra singleton replication factor in the latter case
# where we put the replication doesn't matter because all the shards_per_axis
# are 1
return ShardingSpec(
shards_per_axis=(1,) * len(aval.shape),
is_axis_materialized=(True,) * len(aval.shape),
replication_factors=[] if num_partitions == 1 else [(num_partitions, 0)])
else:
assert len(partitions) == len(aval.shape)
return ShardingSpec(
shards_per_axis=tuple(partitions),
is_axis_materialized=(True,) * len(aval.shape),
replication_factors=[])
def execute_replicated(compiled,
uses_outfeed, backend, in_handler, out_handler, *args):
xla.check_before_outfeed_execution(uses_outfeed)
input_bufs = in_handler(args)
out_bufs = compiled.execute_on_local_devices(list(input_bufs))
return out_handler(out_bufs)
xla_pmap_p = core.Primitive('xla_pmap')
xla_pmap_p.map_primitive = True
xla_pmap_p.multiple_results = True
xla_pmap = partial(core.map_bind, xla_pmap_p)
xla_pmap_p.def_custom_bind(xla_pmap)
xla_pmap_p.def_impl(xla_pmap_impl)
pe.staged_out_calls.add(xla_pmap_p)
def _pmap_translation_rule(c, axis_env,
in_nodes, name_stack, axis_name, axis_size,
global_axis_size, devices, name,
call_jaxpr, *, backend=None, mapped_invars,
donated_invars):
if any(donated_invars):
raise ValueError("Donating buffers passed to a a pmap nested inside a jit "
"or another pmap is not supported.")
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
if axis_env.names and devices is not None:
raise ValueError("Nested pmap with explicit devices argument.")
if global_axis_size is None:
global_axis_size = axis_size
new_env = xla.extend_axis_env(axis_env, axis_name, global_axis_size)
# Shard the in_nodes that are mapped
in_avals = [v.aval for v in call_jaxpr.invars]
in_nodes_sharded = (
_xla_shard(c, aval, new_env, in_node) if in_node_mapped else in_node
for aval, in_node, in_node_mapped in zip(in_avals, in_nodes, mapped_invars))
sharded_outs = xla.jaxpr_subcomp(
c, call_jaxpr, backend, new_env, (),
extend_name_stack(name_stack, wrap_name(name, 'pmap')), *in_nodes_sharded)
out_avals = [v.aval for v in call_jaxpr.outvars]
outs = [_xla_unshard(c, aval, new_env, shard, backend=backend)
for aval, shard in zip(out_avals, sharded_outs)]
return xops.Tuple(c, outs)
xla.call_translations[xla_pmap_p] = _pmap_translation_rule
ad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)
def _xla_shard(c, aval, axis_env, x):
if aval is core.abstract_unit:
return x
elif isinstance(aval, ShapedArray):
dims = list(c.get_shape(x).dimensions())
zero = xb.constant(c, onp.zeros((), dtype=onp.uint32))
idxs = [_unravel_index(c, axis_env)] + [zero] * (len(dims) - 1)
return xops.Reshape(xops.DynamicSlice(x, idxs, [1] + dims[1:]), dims[1:])
else:
raise TypeError((aval, c.get_shape(x)))
# TODO(b/110096942): more efficient gather
def _xla_unshard(c, aval, axis_env, x, backend):
if aval is core.abstract_unit:
return x
elif isinstance(aval, ShapedArray):
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
convert_bool = (onp.issubdtype(aval.dtype, onp.bool_)
and xb.get_backend(backend).platform in ('cpu', 'gpu'))
if convert_bool:
x = xops.ConvertElementType(x, xb.dtype_to_etype(onp.float32))
xla_shape = c.get_shape(x)
dims = list(xla_shape.dimensions())
padded = xops.Broadcast(xb.constant(c, onp.array(0, xla_shape.numpy_dtype())),
[axis_env.sizes[-1]] + dims)
zero = xb.constant(c, onp.zeros((), dtype=onp.uint32))
idxs = [_unravel_index(c, axis_env)] + [zero] * len(dims)
padded = xops.DynamicUpdateSlice(padded, xops.Reshape(x, [1] + dims), idxs)
replica_groups_protos = xc.make_replica_groups(
xla.axis_groups(axis_env, axis_env.names[-1]))
out = xops.CrossReplicaSum(padded, replica_groups_protos)
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
if convert_bool:
nonzero = xops.Ne(out, xb.constant(c, onp.array(0, dtype=onp.float32)))
out = xops.ConvertElementType(nonzero, xb.dtype_to_etype(onp.bool_))
return out
else:
raise TypeError((aval, c.get_shape(x)))
def _unravel_index(c, axis_env):
div = xb.constant(c, onp.array(axis_env.nreps // prod(axis_env.sizes), onp.uint32))
mod = xb.constant(c, onp.array(axis_env.sizes[-1], onp.uint32))
return xops.Rem(xops.Div(xops.ReplicaId(c), div), mod)
### soft_pmap axis split transformation
# To allow pmap to map over logical axes larger than the number of XLA devices
# available, we use a transformation that effectively simulates having more
# devices in software. The strategy is to split the mapped axis into two axes,
# one to be hardware-mapped and the other to be software-mapped. Thus the
# transformation rewrites the function to be mapped so that it accepts a new
# leading axis (the software-mapped axis), and so that collectives in the
# original function correspond to both device-local operations and collective
# communication operations across hardware devices that implement the original
# logical semantics.
@lu.transformation
def split_axis(axis_name, chunk_size, *args):
with core.new_master(SplitAxisTrace) as master:
trace = SplitAxisTrace(master, core.cur_sublevel())
in_tracers = list(map(partial(SplitAxisTracer, trace, axis_name), args))
with add_chunk_to_axis_env(axis_name, trace, chunk_size):
outs = yield in_tracers, {}
out_tracers = list(map(trace.full_raise, outs))
out_vals, out_names = unzip2((t.val, t.axis_name) for t in out_tracers)
del master, out_tracers
out_vals = [broadcast(x, chunk_size, 0) if d is not_mapped else x
for x, d in zip(out_vals, out_names)]
yield out_vals
@lu.transformation_with_aux
def split_axis_subtrace(master, names, *vals):
trace = SplitAxisTrace(master, core.cur_sublevel())
outs = yield list(map(partial(SplitAxisTracer, trace), names, vals)), {}
out_tracers = list(map(trace.full_raise, outs))
out_vals, out_names = unzip2((t.val, t.axis_name) for t in out_tracers)
yield out_vals, out_names
@contextmanager
def add_chunk_to_axis_env(axis_name, soft_trace, soft_size):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
dynamic_axis_env[axis_name].soft_trace = soft_trace
dynamic_axis_env[axis_name].soft_size = soft_size
yield
dynamic_axis_env[axis_name].soft_trace = None
dynamic_axis_env[axis_name].soft_size = None
class SplitAxisTracer(core.Tracer):
def __init__(self, trace, axis_name, val):
self._trace = trace
self.axis_name = axis_name
self.val = val
@property
def aval(self):
aval = raise_to_shaped(core.get_aval(self.val))
if self.axis_name is not_mapped:
return aval
else:
assert isinstance(aval, ShapedArray)
return ShapedArray(aval.shape[1:], aval.dtype)
def full_lower(self):
if self.axis_name is not_mapped:
return core.full_lower(self.val)
else:
return self
class SplitAxisTrace(core.Trace):
def pure(self, val):
return SplitAxisTracer(self, not_mapped, val)
def lift(self, val):
return SplitAxisTracer(self, not_mapped, val)
def sublift(self, val):
return SplitAxisTracer(self, val.axis_name, val.val)
def process_primitive(self, primitive, tracers, params):
vals_in, names_in = unzip2((t.val, t.axis_name) for t in tracers)
if primitive is axis_index_p:
dummy, = vals_in
hard_idx = primitive.bind(dummy, **params)
val_out = hard_idx * params['soft_size'] + onp.arange(params['soft_size'])
return SplitAxisTracer(self, params['axis_name'], val_out)
elif all(axis_name is not_mapped for axis_name in names_in):
return primitive.bind(*vals_in, **params)
else:
name, = set(n for n in names_in if n is not not_mapped)
if primitive in xla.parallel_translations:
# if it's a pmap collective primitive, do something special
if name == params['axis_name']:
# if the name matches this tracer's name, apply the split_axis rule
try:
rule = split_axis_rules[primitive]
except KeyError as err:
msg = "split_axis for {} not implemented. Open a feature request!"
raise NotImplementedError(msg.format(primitive)) from err
which_mapped = [n is not not_mapped for n in names_in]
val_out, is_mapped = rule(vals_in, which_mapped, **params)
name_out = name if is_mapped else not_mapped
if primitive.multiple_results:
return [SplitAxisTracer(self, name_out, v) for v in val_out]
else:
return SplitAxisTracer(self, name_out, val_out)
else:
# if not, bind the primitive without any processing
val_out = primitive.bind(*vals_in, **params)
if primitive.multiple_results:
return [SplitAxisTracer(self, name, v) for v in val_out]
else:
return SplitAxisTracer(self, name, val_out)
else:
# if it's not a pmap collective primitive, act just like batching
rule = batching.get_primitive_batcher(primitive)
axes_in = [n if n is not_mapped else 0 for n in names_in]
val_out, axis_out = rule(vals_in, axes_in, **params)
def new_tracer(x, a):
if a is not_mapped:
return SplitAxisTracer(self, not_mapped, x)
else:
return SplitAxisTracer(self, name, batching.moveaxis(x, a, 0))
if primitive.multiple_results:
return [new_tracer(x, a) for x, a in zip(val_out, axis_out)]
else:
return new_tracer(val_out, axis_out)
def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):
assert call_primitive.multiple_results
vals, names = unzip2((t.val, t.axis_name) for t in tracers)
if all(name is not_mapped for name in names):
return call_primitive.bind(f, *vals, **params)
else:
f, names_out = split_axis_subtrace(f, self.master, names)
vals_out = call_primitive.bind(f, *vals, **params)
return [SplitAxisTracer(self, a, x) for a, x in zip(names_out(), vals_out)]
def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params):
vals, names = unzip2((t.val, t.axis_name) for t in tracers)
if all(name is not_mapped for name in names):
return map_primitive.bind(f, *vals, **params)
else:
# because the map primitive maps over leading axes, we need to transpose
# the software-mapped axis on any mapped arguments to be the second axis;
# then we call the map primitive and resume the trace under the call
vals_trans = [batching.moveaxis(x, 0, 1) if d is not not_mapped else x
for x, d in zip(vals, names)]
f, names_out = split_axis_subtrace(f, self.master, names)
vals_out_trans = map_primitive.bind(f, *vals_trans, **params)
vals_out = [batching.moveaxis(x, 1, 0) if d is not not_mapped else x
for x, d in zip(vals_out_trans, names_out())]
return [SplitAxisTracer(self, a, x) for a, x in zip(names_out(), vals_out)]
def post_process_call(self, call_primitive, out_tracer, params):
val, name = out_tracer.val, out_tracer.axis_name
master = self.master
def todo(x):
trace = SplitAxisTrace(master, core.cur_sublevel())
return SplitAxisTracer(trace, name, x)
return val, todo
post_process_map = post_process_call
split_axis_rules: Dict[core.Primitive, Callable] = {}
| 42.647145 | 93 | 0.707412 |
7954a603f16e7f1d72ec5c154e8b128d7ea2a352 | 365 | py | Python | curated/users/urls.py | mahmoudtokura/Curated2.0 | cfd60673439d7d4f9c4fc4f53e0c7c6a69b271b3 | [
"MIT"
] | null | null | null | curated/users/urls.py | mahmoudtokura/Curated2.0 | cfd60673439d7d4f9c4fc4f53e0c7c6a69b271b3 | [
"MIT"
] | null | null | null | curated/users/urls.py | mahmoudtokura/Curated2.0 | cfd60673439d7d4f9c4fc4f53e0c7c6a69b271b3 | [
"MIT"
] | null | null | null | from django.urls import path
from curated.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 24.333333 | 66 | 0.70137 |
7954a652c90c7d7e684263bc85d0121a6238ddfb | 149 | py | Python | lane_finding_pipeline/piplineinterface.py | kerrywang/Advanced-Lane-Finding | 146fd169b9c9a0c58d2bd6103e147fdc4a26684d | [
"MIT"
] | null | null | null | lane_finding_pipeline/piplineinterface.py | kerrywang/Advanced-Lane-Finding | 146fd169b9c9a0c58d2bd6103e147fdc4a26684d | [
"MIT"
] | 17 | 2020-01-28T22:46:24.000Z | 2022-03-11T23:48:39.000Z | lane_finding_pipeline/piplineinterface.py | kerrywang/Advanced-Lane-Finding | 146fd169b9c9a0c58d2bd6103e147fdc4a26684d | [
"MIT"
] | null | null | null | import abc
class PipeLineInterface(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
def process(self, image):
pass | 18.625 | 33 | 0.644295 |
7954a6f1fcc27bc7a2588495426c5968a5f1d4f2 | 134 | py | Python | tests/pydecompile-test/baselines/class_handler_parameter.py | gengxf0505/pxt | eca93a0e0605e68adcfbebce778cc5912a10efcf | [
"MIT"
] | 1 | 2020-11-11T01:47:26.000Z | 2020-11-11T01:47:26.000Z | tests/pydecompile-test/baselines/class_handler_parameter.py | gengxf0505/pxt | eca93a0e0605e68adcfbebce778cc5912a10efcf | [
"MIT"
] | 3 | 2019-08-13T14:36:05.000Z | 2020-01-18T00:04:19.000Z | tests/pydecompile-test/baselines/class_handler_parameter.py | gengxf0505/pxt | eca93a0e0605e68adcfbebce778cc5912a10efcf | [
"MIT"
] | 2 | 2019-10-29T06:56:11.000Z | 2021-05-25T10:18:12.000Z | #/ <reference path="./testBlocks/classHandlerParameter.ts" />
x = game.Sprite()
def function_0(other):
pass
x.onOverlap(function_0) | 22.333333 | 61 | 0.738806 |
7954a74be5c915c47cfec2ac5007878e721b3a54 | 886 | py | Python | tests/bracket.py | TheSecEng/MarkdownTOC | 7c69137249820dc586fd90b58fca2f34c54f2abc | [
"MIT"
] | null | null | null | tests/bracket.py | TheSecEng/MarkdownTOC | 7c69137249820dc586fd90b58fca2f34c54f2abc | [
"MIT"
] | 1 | 2020-05-24T11:44:49.000Z | 2020-05-24T11:44:49.000Z | tests/bracket.py | TheSecEng/MarkdownTOC | 7c69137249820dc586fd90b58fca2f34c54f2abc | [
"MIT"
] | null | null | null | # coding:utf-8
from base import TestBase
class TestBracket(TestBase):
'''Test for attributes \'bracket\''''
# for debug
# def tearDown(self):
# pass
# TODO: How can we remove 'autolink=true' only in these tests below ?
bracket_text = \
'''
<!-- MarkdownTOC autolink=true {0} -->
<!-- /MarkdownTOC -->
# foo bar
'''
def test_bracket_default(self):
'''Default Bracket is round'''
toc = self.init_update(self.bracket_text.format(''))['toc']
self.assert_In('- [foo bar](#foo-bar)', toc)
def test_bracket_square(self):
toc = self.init_update(self.bracket_text.format('bracket=square'))['toc']
self.assert_In('- [foo bar][foo-bar]', toc)
def test_bracket_round(self):
toc = self.init_update(self.bracket_text.format('bracket=round'))['toc']
self.assert_In('- [foo bar](#foo-bar)', toc)
| 25.314286 | 81 | 0.61851 |
7954a7bbe8ccac9a9d76513832ed91b4c1c715ad | 3,075 | py | Python | tests/onegov/town6/test_views.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | tests/onegov/town6/test_views.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | tests/onegov/town6/test_views.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | import onegov.core
import onegov.org
from tests.shared import utils
def test_view_permissions():
utils.assert_explicit_permissions(onegov.org, onegov.org.OrgApp)
def test_notfound(client):
notfound_page = client.get('/foobar', expect_errors=True)
assert "Seite nicht gefunden" in notfound_page
assert notfound_page.status_code == 404
def test_links(client):
root_url = client.get('/').pyquery('.side-navigation a').attr('href')
client.login_admin()
root_page = client.get(root_url)
new_link = root_page.click("Verknüpfung")
assert "Neue Verknüpfung" in new_link
new_link.form['title'] = 'Google'
new_link.form['url'] = 'https://www.google.ch'
link = new_link.form.submit().follow()
assert "Sie wurden nicht automatisch weitergeleitet" in link
assert 'https://www.google.ch' in link
client.get('/auth/logout')
root_page = client.get(root_url)
assert "Google" in root_page
google = root_page.click("Google", index=0)
assert google.status_code == 302
assert google.location == 'https://www.google.ch'
def test_clipboard(client):
client.login_admin()
page = client.get('/topics/organisation')
assert 'paste-link' not in page
page = page.click(
'Kopieren',
extra_environ={'HTTP_REFERER': page.request.url}
).follow()
assert 'paste-link' in page
page = page.click('Einf').form.submit().follow()
assert '/organisation/organisation' in page.request.url
def test_clipboard_separation(client):
client.login_admin()
page = client.get('/topics/organisation')
page = page.click('Kopieren')
assert 'paste-link' in client.get('/topics/organisation')
# new client (browser) -> new clipboard
client = client.spawn()
client.login_admin()
assert 'paste-link' not in client.get('/topics/organisation')
def test_gobal_tools(client):
links = client.get('/').pyquery('.globals a')
assert links == []
client.login_admin()
links = client.get('/').pyquery('.globals a')
assert links != []
def test_top_navigation(client):
links = client.get('/').pyquery('.side-navigation a span')
assert links.text() == 'Organisation Themen Kontakt Aktuelles'
def test_announcement(client):
client.login_admin()
color = '#006fbb'
bg_color = '#008263'
text = 'This is an announcement which appears on top of the page'
settings = client.get('/header-settings')
# test default not giving the color
assert settings.form['left_header_announcement_bg_color'].value == (
'#FBBC05'
)
assert settings.form['left_header_announcement_font_color'].value == (
'#000000'
)
settings.form['left_header_announcement'] = text
settings.form['left_header_announcement_bg_color'] = bg_color
settings.form['left_header_announcement_font_color'] = color
page = settings.form.submit().follow()
assert text in page
assert (
f'<div id="announcement" style="color: {color}; '
f'background-color: {bg_color};">'
) in page
| 27.212389 | 74 | 0.67935 |
7954a91cf63da0df240ed0cf80784259f904d666 | 1,028 | py | Python | GCE/sources/GCEResourceLimits.py | shreyb/decisionengine_modules | 6658c623a79fc66b45010f464770b0cb613bf754 | [
"BSD-3-Clause"
] | null | null | null | GCE/sources/GCEResourceLimits.py | shreyb/decisionengine_modules | 6658c623a79fc66b45010f464770b0cb613bf754 | [
"BSD-3-Clause"
] | null | null | null | GCE/sources/GCEResourceLimits.py | shreyb/decisionengine_modules | 6658c623a79fc66b45010f464770b0cb613bf754 | [
"BSD-3-Clause"
] | null | null | null | """
Query Resource Limits from another channel with the factory source
"""
import typing
from decisionengine.framework.modules import Source, SourceProxy
from decisionengine.framework.modules.Source import Parameter
@Source.supports_config(Parameter('entry_limit_attrs', type=list))
@Source.produces(GCE_Resource_Limits=typing.Any)
class GCEResourceLimits(SourceProxy.SourceProxy):
"""
Consumes factory data to find GCE entry limits
"""
def __init__(self, config):
super().__init__(config)
self.entry_limit_attrs = config.get('entry_limit_attrs')
def acquire(self):
"""
Acquire google factory entry limits from source proxy
and return as pandas frame
:rtype: :obj:`~pd.DataFrame`
"""
factory_data = super().acquire()
df_factory_data = factory_data.get(self.data_keys[0])
df_entry_limits = df_factory_data[self.entry_limit_attrs]
return {'GCE_Resource_Limits': df_entry_limits}
Source.describe(GCEResourceLimits)
| 29.371429 | 66 | 0.718872 |
7954a9969c943ecfedc5c3e18e81f213fe60dd00 | 1,447 | py | Python | examples/example3.py | mjbright/kubernetes-client-python | a677ac36b1cc7bad43dc9f67195d05961d68b339 | [
"Apache-2.0"
] | null | null | null | examples/example3.py | mjbright/kubernetes-client-python | a677ac36b1cc7bad43dc9f67195d05961d68b339 | [
"Apache-2.0"
] | null | null | null | examples/example3.py | mjbright/kubernetes-client-python | a677ac36b1cc7bad43dc9f67195d05961d68b339 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kubernetes import client, config
def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
print("Supported APIs (* is preferred version):")
print("%-20s %s" %
("core", ",".join(client.CoreApi().get_api_versions().versions)))
for api in client.ApisApi().get_api_versions().groups:
versions = []
for v in api.versions:
name = ""
if v.version == api.preferred_version.version and len(
api.versions) > 1:
name += "*"
name += v.version
versions.append(name)
print("%-40s %s" % (api.name, ",".join(versions)))
if __name__ == '__main__':
main()
| 33.651163 | 75 | 0.656531 |
7954ab3057fde6252fc6ff2cf7d65d85beb0cd8d | 4,995 | py | Python | app/django/photo/admin.py | Murabei-OpenSource-Codes/ai-photo-sampler--backend | c098a5cb544da89623a000d87daa18f22cfecfce | [
"BSD-3-Clause"
] | null | null | null | app/django/photo/admin.py | Murabei-OpenSource-Codes/ai-photo-sampler--backend | c098a5cb544da89623a000d87daa18f22cfecfce | [
"BSD-3-Clause"
] | null | null | null | app/django/photo/admin.py | Murabei-OpenSource-Codes/ai-photo-sampler--backend | c098a5cb544da89623a000d87daa18f22cfecfce | [
"BSD-3-Clause"
] | null | null | null | import base64
import datetime
from django.utils import timezone
from django.contrib import admin
from django.db.models import Q
from photo.models import DescriptionImage
from django.utils.html import format_html
from core.singletons import storage_object
from django.contrib.admin import SimpleListFilter
from django.contrib import messages
from flat_json_widget.widgets import FlatJsonWidget
from django import forms
class EmptyFileFilter(SimpleListFilter):
title = 'With file'
parameter_name = 'with_files'
def lookups(self, request, model_admin):
return [
("yes", "yes"),
("no", "no")]
def queryset(self, request, queryset):
value = self.value()
if value == "yes":
return queryset.exclude(
Q(file='') | Q(file=None))
elif value == "no":
return queryset.filter(
Q(file='') | Q(file=None))
return queryset
class DescriptionImageForm(forms.ModelForm):
class Meta:
widgets = {
'dimensions': FlatJsonWidget,
'extra_info': FlatJsonWidget,
}
class DescriptionImageAdmin(admin.ModelAdmin):
list_display = [
'id', 'app_label', 'description', 'team',
'image_created_at', 'with_file', 'inactive']
list_display_links = list_display
search_fields = ['app_label', 'description']
readonly_fields = [
'app_label', 'thumbnail', 'image_created_at', 'image_uploaded_at',
'image_created_by', 'hash', 'obj_created_at', 'obj_created_by',
'team']
list_filter = [EmptyFileFilter, 'image_created_at', 'team', 'inactive']
list_per_page = 30
inlines = []
form = DescriptionImageForm
fieldsets = [
['General Information', {
'fields': (
"app_label", "description", "notes", "team",
"obj_created_at", "obj_created_by", "inactive")}],
['Image', {
'fields': (
"file", "thumbnail", "image_created_at",
"image_uploaded_at", "image_created_by")}],
['Dimentions/Extra Info.', {
'fields': (
"dimensions", "extra_info",)}],
]
def save_model(self, request, obj, form, change):
if obj.id is None:
obj.obj_created_by = request.user
obj.obj_created_at = timezone.now()
obj.team_id = request.user.experiment_team.team_id
if 'file' in form.changed_data:
temp_time = timezone.now()
obj.image_created_at = temp_time
obj.image_uploaded_at = temp_time
obj.image_created_by = request.user
obj.save()
def thumbnail(self, obj):
if obj.file.name:
template = (
"<img src='data:image/jpeg;base64,{base}' "
"style='width: 100%;'/>")
results = storage_object.read_file(obj.file.name)["data"]
base64_encoded_data = base64.b64encode(results)
image64 = base64_encoded_data.decode('utf-8')
html = format_html(template.format(base=image64))
return html
else:
return ""
thumbnail.allow_tags = True
thumbnail.__name__ = 'Thumbnail'
def with_file(self, obj):
return obj.file.name != ""
def get_queryset(self, request):
qs = super(DescriptionImageAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(
Q(team=request.user.profile.experiment_team.team_id) | Q(team__isnull=True))
def has_add_permission(self, request, obj=None):
if request.user.experiment_team.team_id is None:
return False
else:
return True
@admin.action(description='Remove images from objects')
def remove_file(self, request, queryset):
"""Admin action to remove files from objects."""
not_null_images = list(queryset.filter(file__isnull=False))
for obj in not_null_images:
obj.remove_image_file()
self.message_user(
request, '%d file(s) removed.' % len(not_null_images),
messages.SUCCESS)
@admin.action(description='Toggle inactive')
def set_inactive(self, request, queryset):
list_inactive = list(queryset.filter(
inactive=True).values_list('id', flat=True))
list_active = list(queryset.filter(
inactive=False).values_list('id', flat=True))
n_set_active = queryset.filter(
id__in=list_inactive).update(inactive=False)
n_set_inactive = queryset.filter(
id__in=list_active).update(inactive=True)
msg_template = "{} image(s) set as inactive; {} set as active"
self.message_user(
request, msg_template.format(n_set_inactive, n_set_active),
messages.SUCCESS)
actions = [remove_file, set_inactive]
admin.site.register(DescriptionImage, DescriptionImageAdmin)
| 33.75 | 88 | 0.616016 |
7954ab8f63b72f63bee762d27c6df4f4a2f7a027 | 393 | py | Python | django/cantusdb_project/cantusdb/wsgi.py | DDMAL/CantusDB | 63c7b8df3c703008bd331097c7fb5e72c8890bb0 | [
"MIT"
] | 2 | 2020-10-16T09:50:54.000Z | 2021-07-29T08:38:34.000Z | django/cantusdb_project/cantusdb/wsgi.py | DDMAL/CantusDB | 63c7b8df3c703008bd331097c7fb5e72c8890bb0 | [
"MIT"
] | 65 | 2020-05-11T19:22:30.000Z | 2022-03-30T22:30:02.000Z | django/cantusdb_project/cantusdb/wsgi.py | DDMAL/CantusDB | 63c7b8df3c703008bd331097c7fb5e72c8890bb0 | [
"MIT"
] | null | null | null | """
WSGI config for cantusdb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cantusdb.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
7954abbe5cd2359f547187b4b929f3cc2788181a | 2,065 | py | Python | whatwhy/resource_manager/jamspell.py | stevengt/whatwhy | bf6f87fd20eebfc89240835c3121ec3079632133 | [
"MIT"
] | 2 | 2020-07-29T07:26:47.000Z | 2020-07-29T07:26:55.000Z | whatwhy/resource_manager/jamspell.py | stevengt/whatwhy | bf6f87fd20eebfc89240835c3121ec3079632133 | [
"MIT"
] | null | null | null | whatwhy/resource_manager/jamspell.py | stevengt/whatwhy | bf6f87fd20eebfc89240835c3121ec3079632133 | [
"MIT"
] | null | null | null | import os
import shutil
import logging
import tarfile
import requests
from whatwhy import get_resources_folder
from .nltk import configure_nltk
logging.basicConfig(level="INFO")
logger = logging.getLogger(__name__)
def get_jamspell_model_file_name():
"""Returns the file path of the jamspell language model, downloading it if necessary."""
configure_jamspell()
jamspell_model_file_name = os.path.join(get_jamspell_resources_folder(), "en.bin")
if not os.path.exists(jamspell_model_file_name):
logger.warning("jamspell language model file was not found.")
download_jamspell_language_model()
return jamspell_model_file_name
def configure_jamspell():
assert_swig3_is_installed()
configure_nltk()
def get_jamspell_resources_folder():
jamspell_resources_folder = os.path.join(get_resources_folder(), "jamspell")
if not os.path.isdir(jamspell_resources_folder):
os.mkdir(jamspell_resources_folder)
return jamspell_resources_folder
def assert_swig3_is_installed():
is_swig3_installed = False
swig3_executable_names = ["swig", "swig3.0", "swig.exe", "swig3.0.exe"]
for swig3_executable_name in swig3_executable_names:
if shutil.which(swig3_executable_name) is not None:
is_swig3_installed = True
error_message = "swig3.0 library was not found. Please install using the installation instructions at https://github.com/swig/swig/wiki/Getting-Started"
assert is_swig3_installed, error_message
def download_jamspell_language_model():
logger.info("Downloading jamspell language model")
model_url = "https://github.com/bakwc/JamSpell-models/raw/master/en.tar.gz"
tar_file_name = os.path.join(get_jamspell_resources_folder(), "en.tar.gz")
with requests.get(model_url, stream=True) as compressed_model:
with open(tar_file_name, "wb") as tar_file:
tar_file.write(compressed_model.content)
with tarfile.open(tar_file_name) as tar_file:
tar_file.extractall(path=get_jamspell_resources_folder())
os.remove(tar_file_name)
| 39.711538 | 156 | 0.763196 |
7954abdefb3dc2ab362cf404df34131487647ecb | 787 | py | Python | db_file_storage/views.py | zvolsky/db_file_storage | 9b94cfc46241b24e694da0849aa482a9fe79bf62 | [
"MIT"
] | 2 | 2019-06-15T03:32:50.000Z | 2019-06-15T10:14:28.000Z | db_file_storage/views.py | zvolsky/db_file_storage | 9b94cfc46241b24e694da0849aa482a9fe79bf62 | [
"MIT"
] | null | null | null | db_file_storage/views.py | zvolsky/db_file_storage | 9b94cfc46241b24e694da0849aa482a9fe79bf62 | [
"MIT"
] | 1 | 2019-06-15T03:32:53.000Z | 2019-06-15T03:32:53.000Z | # third party
from django.http import HttpResponse, HttpResponseBadRequest
from django.utils.translation import ugettext as _
from wsgiref.util import FileWrapper
# project
from db_file_storage.storage import DatabaseFileStorage
storage = DatabaseFileStorage()
def get_file(request, add_attachment_headers):
name = request.GET.get('name')
try:
_file = storage.open(name)
except Exception:
return HttpResponseBadRequest(_('Invalid request'))
response = HttpResponse(
FileWrapper(_file),
content_type=_file.mimetype
)
response['Content-Length'] = _file.tell()
if add_attachment_headers:
response['Content-Disposition'] = \
'attachment; filename=%(name)s' % {'name': _file.filename}
return response
| 26.233333 | 70 | 0.711563 |
7954ac11f3945bdbe65d168c7a86097ebc504f22 | 21,483 | py | Python | awx/sso/tests/functional/test_pipeline.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | null | null | null | awx/sso/tests/functional/test_pipeline.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | 2 | 2022-02-10T11:57:21.000Z | 2022-02-27T22:43:44.000Z | awx/sso/tests/functional/test_pipeline.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | null | null | null | import pytest
import re
from unittest import mock
from django.utils.timezone import now
from awx.sso.pipeline import update_user_orgs, update_user_teams, update_user_orgs_by_saml_attr, update_user_teams_by_saml_attr, _check_flag
from awx.main.models import User, Team, Organization, Credential, CredentialType
@pytest.fixture
def galaxy_credential():
galaxy_type = CredentialType.objects.create(kind='galaxy')
cred = Credential(
created=now(), modified=now(), name='Ansible Galaxy', managed=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'}
)
cred.save()
@pytest.fixture
def users():
u1 = User.objects.create(username='user1@foo.com', last_name='foo', first_name='bar', email='user1@foo.com')
u2 = User.objects.create(username='user2@foo.com', last_name='foo', first_name='bar', email='user2@foo.com')
u3 = User.objects.create(username='user3@foo.com', last_name='foo', first_name='bar', email='user3@foo.com')
return (u1, u2, u3)
@pytest.mark.django_db
class TestSAMLMap:
@pytest.fixture
def backend(self):
class Backend:
s = {
'ORGANIZATION_MAP': {
'Default': {
'remove': True,
'admins': 'foobar',
'remove_admins': True,
'users': 'foo',
'remove_users': True,
'organization_alias': '',
}
},
'TEAM_MAP': {'Blue': {'organization': 'Default', 'remove': True, 'users': ''}, 'Red': {'organization': 'Default', 'remove': True, 'users': ''}},
}
def setting(self, key):
return self.s[key]
return Backend()
@pytest.fixture
def org(self):
return Organization.objects.create(name="Default")
def test_update_user_orgs(self, org, backend, users, galaxy_credential):
u1, u2, u3 = users
# Test user membership logic with regular expressions
backend.setting('ORGANIZATION_MAP')['Default']['admins'] = re.compile('.*')
backend.setting('ORGANIZATION_MAP')['Default']['users'] = re.compile('.*')
update_user_orgs(backend, None, u1)
update_user_orgs(backend, None, u2)
update_user_orgs(backend, None, u3)
assert org.admin_role.members.count() == 3
assert org.member_role.members.count() == 3
# Test remove feature enabled
backend.setting('ORGANIZATION_MAP')['Default']['admins'] = ''
backend.setting('ORGANIZATION_MAP')['Default']['users'] = ''
backend.setting('ORGANIZATION_MAP')['Default']['remove_admins'] = True
backend.setting('ORGANIZATION_MAP')['Default']['remove_users'] = True
update_user_orgs(backend, None, u1)
assert org.admin_role.members.count() == 2
assert org.member_role.members.count() == 2
# Test remove feature disabled
backend.setting('ORGANIZATION_MAP')['Default']['remove_admins'] = False
backend.setting('ORGANIZATION_MAP')['Default']['remove_users'] = False
update_user_orgs(backend, None, u2)
assert org.admin_role.members.count() == 2
assert org.member_role.members.count() == 2
# Test organization alias feature
backend.setting('ORGANIZATION_MAP')['Default']['organization_alias'] = 'Default_Alias'
update_user_orgs(backend, None, u1)
assert Organization.objects.get(name="Default_Alias") is not None
for o in Organization.objects.all():
assert o.galaxy_credentials.count() == 1
assert o.galaxy_credentials.first().name == 'Ansible Galaxy'
def test_update_user_teams(self, backend, users, galaxy_credential):
u1, u2, u3 = users
# Test user membership logic with regular expressions
backend.setting('TEAM_MAP')['Blue']['users'] = re.compile('.*')
backend.setting('TEAM_MAP')['Red']['users'] = re.compile('.*')
update_user_teams(backend, None, u1)
update_user_teams(backend, None, u2)
update_user_teams(backend, None, u3)
assert Team.objects.get(name="Red").member_role.members.count() == 3
assert Team.objects.get(name="Blue").member_role.members.count() == 3
# Test remove feature enabled
backend.setting('TEAM_MAP')['Blue']['remove'] = True
backend.setting('TEAM_MAP')['Red']['remove'] = True
backend.setting('TEAM_MAP')['Blue']['users'] = ''
backend.setting('TEAM_MAP')['Red']['users'] = ''
update_user_teams(backend, None, u1)
assert Team.objects.get(name="Red").member_role.members.count() == 2
assert Team.objects.get(name="Blue").member_role.members.count() == 2
# Test remove feature disabled
backend.setting('TEAM_MAP')['Blue']['remove'] = False
backend.setting('TEAM_MAP')['Red']['remove'] = False
update_user_teams(backend, None, u2)
assert Team.objects.get(name="Red").member_role.members.count() == 2
assert Team.objects.get(name="Blue").member_role.members.count() == 2
for o in Organization.objects.all():
assert o.galaxy_credentials.count() == 1
assert o.galaxy_credentials.first().name == 'Ansible Galaxy'
@pytest.mark.django_db
class TestSAMLAttr:
@pytest.fixture
def kwargs(self):
return {
'username': u'cmeyers@redhat.com',
'uid': 'idp:cmeyers@redhat.com',
'request': {u'SAMLResponse': [], u'RelayState': [u'idp']},
'is_new': False,
'response': {
'session_index': '_0728f0e0-b766-0135-75fa-02842b07c044',
'idp_name': u'idp',
'attributes': {
'memberOf': ['Default1', 'Default2'],
'admins': ['Default3'],
'auditors': ['Default4'],
'groups': ['Blue', 'Red'],
'User.email': ['cmeyers@redhat.com'],
'User.LastName': ['Meyers'],
'name_id': 'cmeyers@redhat.com',
'User.FirstName': ['Chris'],
'PersonImmutableID': [],
},
},
#'social': <UserSocialAuth: cmeyers@redhat.com>,
'social': None,
#'strategy': <awx.sso.strategies.django_strategy.AWXDjangoStrategy object at 0x8523a10>,
'strategy': None,
'new_association': False,
}
@pytest.fixture
def orgs(self):
o1 = Organization.objects.create(name='Default1')
o2 = Organization.objects.create(name='Default2')
o3 = Organization.objects.create(name='Default3')
return (o1, o2, o3)
@pytest.fixture
def mock_settings(self, request):
fixture_args = request.node.get_closest_marker('fixture_args')
if fixture_args and 'autocreate' in fixture_args.kwargs:
autocreate = fixture_args.kwargs['autocreate']
else:
autocreate = True
class MockSettings:
SAML_AUTO_CREATE_OBJECTS = autocreate
SOCIAL_AUTH_SAML_ORGANIZATION_ATTR = {
'saml_attr': 'memberOf',
'saml_admin_attr': 'admins',
'saml_auditor_attr': 'auditors',
'remove': True,
'remove_admins': True,
}
SOCIAL_AUTH_SAML_TEAM_ATTR = {
'saml_attr': 'groups',
'remove': True,
'team_org_map': [
{'team': 'Blue', 'organization': 'Default1'},
{'team': 'Blue', 'organization': 'Default2'},
{'team': 'Blue', 'organization': 'Default3'},
{'team': 'Red', 'organization': 'Default1'},
{'team': 'Green', 'organization': 'Default1'},
{'team': 'Green', 'organization': 'Default3'},
{'team': 'Yellow', 'team_alias': 'Yellow_Alias', 'organization': 'Default4', 'organization_alias': 'Default4_Alias'},
],
}
return MockSettings()
@pytest.fixture
def backend(self):
class Backend:
s = {
'ORGANIZATION_MAP': {
'Default1': {
'remove': True,
'admins': 'foobar',
'remove_admins': True,
'users': 'foo',
'remove_users': True,
'organization_alias': 'o1_alias',
}
}
}
def setting(self, key):
return self.s[key]
return Backend()
def test_update_user_orgs_by_saml_attr(self, orgs, users, galaxy_credential, kwargs, mock_settings, backend):
with mock.patch('django.conf.settings', mock_settings):
o1, o2, o3 = orgs
u1, u2, u3 = users
# Test getting orgs from attribute
update_user_orgs_by_saml_attr(None, None, u1, **kwargs)
update_user_orgs_by_saml_attr(None, None, u2, **kwargs)
update_user_orgs_by_saml_attr(None, None, u3, **kwargs)
assert o1.member_role.members.count() == 3
assert o2.member_role.members.count() == 3
assert o3.member_role.members.count() == 0
# Test remove logic enabled
kwargs['response']['attributes']['memberOf'] = ['Default3']
update_user_orgs_by_saml_attr(None, None, u1, **kwargs)
assert o1.member_role.members.count() == 2
assert o2.member_role.members.count() == 2
assert o3.member_role.members.count() == 1
# Test remove logic disabled
mock_settings.SOCIAL_AUTH_SAML_ORGANIZATION_ATTR['remove'] = False
kwargs['response']['attributes']['memberOf'] = ['Default1', 'Default2']
update_user_orgs_by_saml_attr(None, None, u1, **kwargs)
assert o1.member_role.members.count() == 3
assert o2.member_role.members.count() == 3
assert o3.member_role.members.count() == 1
update_user_orgs_by_saml_attr(backend, None, u1, **kwargs)
assert Organization.objects.get(name="o1_alias").member_role.members.count() == 1
for o in Organization.objects.all():
assert o.galaxy_credentials.count() == 1
assert o.galaxy_credentials.first().name == 'Ansible Galaxy'
def test_update_user_teams_by_saml_attr(self, orgs, users, galaxy_credential, kwargs, mock_settings):
with mock.patch('django.conf.settings', mock_settings):
o1, o2, o3 = orgs
u1, u2, u3 = users
# Test getting teams from attribute with team->org mapping
kwargs['response']['attributes']['groups'] = ['Blue', 'Red', 'Green']
# Ensure basic functionality
update_user_teams_by_saml_attr(None, None, u1, **kwargs)
update_user_teams_by_saml_attr(None, None, u2, **kwargs)
update_user_teams_by_saml_attr(None, None, u3, **kwargs)
assert Team.objects.get(name='Blue', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Blue', organization__name='Default2').member_role.members.count() == 3
assert Team.objects.get(name='Blue', organization__name='Default3').member_role.members.count() == 3
assert Team.objects.get(name='Red', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Green', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Green', organization__name='Default3').member_role.members.count() == 3
# Test remove logic
kwargs['response']['attributes']['groups'] = ['Green']
update_user_teams_by_saml_attr(None, None, u1, **kwargs)
update_user_teams_by_saml_attr(None, None, u2, **kwargs)
update_user_teams_by_saml_attr(None, None, u3, **kwargs)
assert Team.objects.get(name='Blue', organization__name='Default1').member_role.members.count() == 0
assert Team.objects.get(name='Blue', organization__name='Default2').member_role.members.count() == 0
assert Team.objects.get(name='Blue', organization__name='Default3').member_role.members.count() == 0
assert Team.objects.get(name='Red', organization__name='Default1').member_role.members.count() == 0
assert Team.objects.get(name='Green', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Green', organization__name='Default3').member_role.members.count() == 3
# Test remove logic disabled
mock_settings.SOCIAL_AUTH_SAML_TEAM_ATTR['remove'] = False
kwargs['response']['attributes']['groups'] = ['Blue']
update_user_teams_by_saml_attr(None, None, u1, **kwargs)
update_user_teams_by_saml_attr(None, None, u2, **kwargs)
update_user_teams_by_saml_attr(None, None, u3, **kwargs)
assert Team.objects.get(name='Blue', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Blue', organization__name='Default2').member_role.members.count() == 3
assert Team.objects.get(name='Blue', organization__name='Default3').member_role.members.count() == 3
assert Team.objects.get(name='Red', organization__name='Default1').member_role.members.count() == 0
assert Team.objects.get(name='Green', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Green', organization__name='Default3').member_role.members.count() == 3
for o in Organization.objects.all():
assert o.galaxy_credentials.count() == 1
assert o.galaxy_credentials.first().name == 'Ansible Galaxy'
def test_update_user_teams_alias_by_saml_attr(self, orgs, users, galaxy_credential, kwargs, mock_settings):
with mock.patch('django.conf.settings', mock_settings):
u1 = users[0]
# Test getting teams from attribute with team->org mapping
kwargs['response']['attributes']['groups'] = ['Yellow']
# Ensure team and org will be created
update_user_teams_by_saml_attr(None, None, u1, **kwargs)
assert Team.objects.filter(name='Yellow', organization__name='Default4').count() == 0
assert Team.objects.filter(name='Yellow_Alias', organization__name='Default4').count() == 1
assert Team.objects.get(name='Yellow_Alias', organization__name='Default4').member_role.members.count() == 1
# only Org 4 got created/updated
org = Organization.objects.get(name='Default4')
assert org.galaxy_credentials.count() == 1
assert org.galaxy_credentials.first().name == 'Ansible Galaxy'
@pytest.mark.fixture_args(autocreate=False)
def test_autocreate_disabled(self, users, kwargs, mock_settings):
kwargs['response']['attributes']['memberOf'] = ['Default1', 'Default2', 'Default3']
kwargs['response']['attributes']['groups'] = ['Blue', 'Red', 'Green']
with mock.patch('django.conf.settings', mock_settings):
for u in users:
update_user_orgs_by_saml_attr(None, None, u, **kwargs)
update_user_teams_by_saml_attr(None, None, u, **kwargs)
assert Organization.objects.count() == 0
assert Team.objects.count() == 0
# precreate everything
o1 = Organization.objects.create(name='Default1')
o2 = Organization.objects.create(name='Default2')
o3 = Organization.objects.create(name='Default3')
Team.objects.create(name='Blue', organization_id=o1.id)
Team.objects.create(name='Blue', organization_id=o2.id)
Team.objects.create(name='Blue', organization_id=o3.id)
Team.objects.create(name='Red', organization_id=o1.id)
Team.objects.create(name='Green', organization_id=o1.id)
Team.objects.create(name='Green', organization_id=o3.id)
for u in users:
update_user_orgs_by_saml_attr(None, None, u, **kwargs)
update_user_teams_by_saml_attr(None, None, u, **kwargs)
assert o1.member_role.members.count() == 3
assert o2.member_role.members.count() == 3
assert o3.member_role.members.count() == 3
assert Team.objects.get(name='Blue', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Blue', organization__name='Default2').member_role.members.count() == 3
assert Team.objects.get(name='Blue', organization__name='Default3').member_role.members.count() == 3
assert Team.objects.get(name='Red', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Green', organization__name='Default1').member_role.members.count() == 3
assert Team.objects.get(name='Green', organization__name='Default3').member_role.members.count() == 3
def test_galaxy_credential_auto_assign(self, users, kwargs, galaxy_credential, mock_settings):
kwargs['response']['attributes']['memberOf'] = ['Default1', 'Default2', 'Default3']
kwargs['response']['attributes']['groups'] = ['Blue', 'Red', 'Green']
with mock.patch('django.conf.settings', mock_settings):
for u in users:
update_user_orgs_by_saml_attr(None, None, u, **kwargs)
update_user_teams_by_saml_attr(None, None, u, **kwargs)
assert Organization.objects.count() == 4
for o in Organization.objects.all():
assert o.galaxy_credentials.count() == 1
assert o.galaxy_credentials.first().name == 'Ansible Galaxy'
@pytest.mark.django_db
class TestSAMLUserFlags:
@pytest.mark.parametrize(
"user_flags_settings, expected",
[
# In this case we will pass no user flags so new_flag should be false and changed will def be false
(
{},
(False, False),
),
# In this case we will give the user a group to make them an admin
(
{'is_superuser_role': 'test-role-1'},
(True, True),
),
# In this case we will give the user a flag that will make then an admin
(
{'is_superuser_attr': 'is_superuser'},
(True, True),
),
# In this case we will give the user a flag but the wrong value
(
{'is_superuser_attr': 'is_superuser', 'is_superuser_value': 'junk'},
(False, False),
),
# In this case we will give the user a flag and the right value
(
{'is_superuser_attr': 'is_superuser', 'is_superuser_value': 'true'},
(True, True),
),
# In this case we will give the user a proper role and an is_superuser_attr role that they dont have, this should make them an admin
(
{'is_superuser_role': 'test-role-1', 'is_superuser_attr': 'gibberish', 'is_superuser_value': 'true'},
(True, True),
),
# In this case we will give the user a proper role and an is_superuser_attr role that they have, this should make them an admin
(
{'is_superuser_role': 'test-role-1', 'is_superuser_attr': 'test-role-1'},
(True, True),
),
# In this case we will give the user a proper role and an is_superuser_attr role that they have but a bad value, this should make them an admin
(
{'is_superuser_role': 'test-role-1', 'is_superuser_attr': 'is_superuser', 'is_superuser_value': 'junk'},
(False, False),
),
# In this case we will give the user everything
(
{'is_superuser_role': 'test-role-1', 'is_superuser_attr': 'is_superuser', 'is_superuser_value': 'true'},
(True, True),
),
# In this test case we will validate that a single attribute (instead of a list) still works
(
{'is_superuser_attr': 'name_id', 'is_superuser_value': 'test_id'},
(True, True),
),
# This will be a negative test for a single atrribute
(
{'is_superuser_attr': 'name_id', 'is_superuser_value': 'junk'},
(False, False),
),
],
)
def test__check_flag(self, user_flags_settings, expected):
user = User()
user.username = 'John'
user.is_superuser = False
attributes = {
'email': ['noone@nowhere.com'],
'last_name': ['Westcott'],
'is_superuser': ['something', 'else', 'true'],
'username': ['test_id'],
'first_name': ['John'],
'Role': ['test-role-1', 'something', 'different'],
'name_id': 'test_id',
}
assert expected == _check_flag(user, 'superuser', attributes, user_flags_settings)
| 44.943515 | 160 | 0.593632 |
7954ac18407bfc30e76c9f88ce96b6a419650c2d | 35 | py | Python | sciml/__init__.py | soonyenju/sciml | 9ddabd031d5ad6e168442b88ef3a1272482307f8 | [
"MIT"
] | null | null | null | sciml/__init__.py | soonyenju/sciml | 9ddabd031d5ad6e168442b88ef3a1272482307f8 | [
"MIT"
] | null | null | null | sciml/__init__.py | soonyenju/sciml | 9ddabd031d5ad6e168442b88ef3a1272482307f8 | [
"MIT"
] | null | null | null | # coding: utf-8
__all__ = ["utils"] | 17.5 | 19 | 0.628571 |
7954ad430e571134f296812972959600a28d0607 | 1,976 | py | Python | view/error.py | zitelog/fit | 1b0b65b00cd3cbbdda5b1a595a107f44af2fddab | [
"MIT"
] | 1 | 2022-02-17T23:28:14.000Z | 2022-02-17T23:28:14.000Z | view/error.py | zitelog/fit | 1b0b65b00cd3cbbdda5b1a595a107f44af2fddab | [
"MIT"
] | null | null | null | view/error.py | zitelog/fit | 1b0b65b00cd3cbbdda5b1a595a107f44af2fddab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
######
# File: error.py
# Project: FIT
# Created Date: Wednesday, August 18th 2021, 2:27:31 pm
# Author: Fabio Zito
# -----
# Last Modified: Wed Aug 18 2021
# Modified By: Fabio Zito
# -----
# MIT License
#
# Copyright (c) 2021 ZF zitelog@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----
# HISTORY:
# Date By Comments
# ---------- --- ----------------------------------------------------------
######
from PyQt5.QtWidgets import QMessageBox
from PyQt5 import QtCore, QtGui
class ErrorView(QMessageBox):
def __init__(self, severity, title, message, details, parent=None):
super(ErrorView, self).__init__(parent)
# enable custom window hint
self.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.setIcon(severity)
self.setWindowTitle(title)
self.setText(message)
self.setInformativeText(details)
| 38.745098 | 86 | 0.699899 |
7954ada1d5be94eba038385f23633c33864a7a8b | 1,892 | py | Python | nc/migrations/0038_auto_20180805_1846.py | kfarrelly/nucleo | 096fa9fdfac39383269904f5d58b3714ce2d2f46 | [
"Apache-2.0"
] | 1 | 2022-01-10T23:35:53.000Z | 2022-01-10T23:35:53.000Z | nc/migrations/0038_auto_20180805_1846.py | kfarrelly/nucleo | 096fa9fdfac39383269904f5d58b3714ce2d2f46 | [
"Apache-2.0"
] | null | null | null | nc/migrations/0038_auto_20180805_1846.py | kfarrelly/nucleo | 096fa9fdfac39383269904f5d58b3714ce2d2f46 | [
"Apache-2.0"
] | 1 | 2022-01-10T23:36:32.000Z | 2022-01-10T23:36:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-05 18:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('nc', '0037_auto_20180716_2052'),
]
operations = [
migrations.CreateModel(
name='EmailSettings',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='email_settings', serialize=False, to=settings.AUTH_USER_MODEL)),
('allow_payment_notification', models.BooleanField(default=True)),
('allow_token_issuance_notification', models.BooleanField(default=True)),
('allow_trade_notification', models.BooleanField(default=True)),
('allow_follower_notification', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='FollowRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('requester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='requests_to_follow', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follower_requests', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='profile',
name='is_private',
field=models.BooleanField(default=False),
),
]
| 43 | 187 | 0.650634 |
7954adf727139e48db300db600aa22adbf0732fa | 2,080 | py | Python | cl_app/models.py | abbey-titcomb/the-store-to-be-named | edfcfca2af89df82efa8bd0c85bc63b32197f10b | [
"MIT"
] | null | null | null | cl_app/models.py | abbey-titcomb/the-store-to-be-named | edfcfca2af89df82efa8bd0c85bc63b32197f10b | [
"MIT"
] | 5 | 2020-06-06T01:14:32.000Z | 2022-01-13T02:46:40.000Z | cl_app/models.py | abbey-titcomb/the-store-to-be-named | edfcfca2af89df82efa8bd0c85bc63b32197f10b | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
# Create your models here.
class City(models.Model):
city = models.CharField(max_length=20)
def __str__(self):
return self.city
class Profile(models.Model):
user = models.OneToOneField('auth.User', on_delete=models.CASCADE)
profile_city = models.ForeignKey(City, verbose_name='Preferred City', null=True, on_delete=models.CASCADE)
preferred_contact = models.CharField(max_length=30, null=True)
def __str__(self):
return str(self.user)
class ListingType(models.Model):
name = models.CharField(max_length=20)
parent = models.ForeignKey("self", null=True, blank=True, related_name='subcat', on_delete=models.CASCADE)
def __str__(self):
return self.name
class Listing(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
listing_city = models.ForeignKey(City, on_delete=models.CASCADE)
category = models.ForeignKey(ListingType, on_delete=models.CASCADE)
title = models.CharField(max_length=40)
price = models.IntegerField()
description = models.TextField()
photo = models.ImageField(upload_to="listing_photos", null=True, blank=True, verbose_name="Listing Photo")
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering = ['-created']
@property
def photo_url(self):
if self.photo:
return self.photo.url
return "/media/listing_photos/classifieds-default.jpg"
@receiver(post_save, sender='auth.User')
def create_user_profile(**kwargs):
created = kwargs.get("created")
instance = kwargs.get("instance")
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender='auth.User')
def create_token(**kwargs):
created = kwargs.get('created')
instance = kwargs.get('instance')
if created:
Token.objects.create(user=instance)
| 30.144928 | 110 | 0.713942 |
7954af1664e98eaa9a5c1fe9a18a5ab24ab6272d | 3,512 | py | Python | tests/unit/test_external_dns.py | awsbot-labs/python-kubernetes-lambda | 0f7cf746ef365391a83b577154caa8dc2a0254af | [
"MIT"
] | null | null | null | tests/unit/test_external_dns.py | awsbot-labs/python-kubernetes-lambda | 0f7cf746ef365391a83b577154caa8dc2a0254af | [
"MIT"
] | null | null | null | tests/unit/test_external_dns.py | awsbot-labs/python-kubernetes-lambda | 0f7cf746ef365391a83b577154caa8dc2a0254af | [
"MIT"
] | null | null | null | from lambdakube.external_dns import ExternalDNS
from tests.unit.test_utils import BaseLambdaKubeTest
from mock import Mock
class ExternalDNSTest(BaseLambdaKubeTest):
def setUp(self):
super(ExternalDNSTest, self).setUp()
self.name = 'external-dns'
self.namespace = 'kube-system'
self.external_dns = ExternalDNS(
configuration=self.config,
role_arn=self.role_arn,
dns_domain=self.dns_domain
)
self.kube_client = Mock()
type(self.external_dns).apps_v1_api = self.kube_client
type(self.external_dns).core_v1_api = self.kube_client
type(self.external_dns).rbac_v1_api = self.kube_client
def test_create(self):
response = self.external_dns.create()
self.assertEqual(len(response), 4)
def test_patch(self):
response = self.external_dns.create()
self.assertEqual(len(response), 4)
def test_delete(self):
response = self.external_dns.create()
self.assertEqual(len(response), 4)
def test_cluster_role(self):
response = self.external_dns._cluster_role()
self.assertEqual(response.api_version, 'rbac.authorization.k8s.io/v1beta1')
self.assertEqual(response.kind, 'ClusterRole')
self.assertTrue(response.metadata)
self.assertTrue(response.rules)
def test_service_account(self):
annotations = dict({'eks.amazonaws.com/role-arn': self.role_arn})
response = self.external_dns._service_account()
self.assertEqual(response.api_version, 'v1')
self.assertEqual(response.kind, 'ServiceAccount')
self.assertEqual(response.metadata.annotations, annotations)
def test_deployment(self):
annotations = dict({'iam.amazonaws.com/role': self.role_arn})
args = [
'--source=service',
'--source=ingress',
f'--domain-filter={self.dns_domain}',
'--provider=aws',
'--registry=txt',
'--txt-owner-id=hostedzone-identifier',
]
response = self.external_dns._deployment()
self.assertEqual(response.api_version, 'apps/v1')
self.assertEqual(response.kind, 'Deployment')
self.assertTrue(response.metadata)
self.assertEqual(response.spec.template.metadata.annotations,
annotations)
self.assertEqual(response.spec.strategy.type, 'Recreate')
self.assertEqual(response.spec.template.spec.security_context.fs_group,
65534)
self.assertEqual(response.spec.template.spec.containers.image,
'registry.opensource.zalan.do/teapot/external-dns:latest')
self.assertEqual(response.spec.template.spec.containers.args, args)
def test_cluster_role_binding(self):
response = self.external_dns._cluster_role_binding()
self.assertEqual(response.kind, 'ClusterRoleBinding')
self.assertEqual(response.metadata.name, f'{self.name}-viewer')
self.assertEqual(response.role_ref.api_group,
'rbac.authorization.k8s.io')
self.assertEqual(response.role_ref.kind, 'ClusterRole')
self.assertEqual(response.role_ref.name, self.name)
self.assertEqual(response.subjects[0].api_group, None)
self.assertEqual(response.subjects[0].kind, 'ServiceAccount')
self.assertEqual(response.subjects[0].name, self.name)
self.assertEqual(response.subjects[0].namespace, self.namespace) | 43.358025 | 83 | 0.667141 |
7954af782ff489bd5132456890c68f76c284be3c | 4,421 | py | Python | test/sox_io_backend/test_info.py | lbjcom/audio | 990bb5e57b66c92254365fdd6e43a12d9d0b7c78 | [
"BSD-2-Clause"
] | null | null | null | test/sox_io_backend/test_info.py | lbjcom/audio | 990bb5e57b66c92254365fdd6e43a12d9d0b7c78 | [
"BSD-2-Clause"
] | null | null | null | test/sox_io_backend/test_info.py | lbjcom/audio | 990bb5e57b66c92254365fdd6e43a12d9d0b7c78 | [
"BSD-2-Clause"
] | null | null | null | import itertools
from parameterized import parameterized
from torchaudio.backend import sox_io_backend
from ..common_utils import (
TempDirMixin,
PytorchTestCase,
skipIfNoExec,
skipIfNoExtension,
)
from .common import (
get_test_name
)
from . import sox_utils
@skipIfNoExec('sox')
@skipIfNoExtension
class TestInfo(TempDirMixin, PytorchTestCase):
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=get_test_name)
def test_wav(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` can check wav file correctly"""
duration = 1
path = self.get_temp_path(f'{dtype}_{sample_rate}_{num_channels}.wav')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
bit_depth=sox_utils.get_bit_depth(dtype),
encoding=sox_utils.get_encoding(dtype),
duration=duration,
)
info = sox_io_backend.info(path)
assert info.get_sample_rate() == sample_rate
assert info.get_num_frames() == sample_rate * duration
assert info.get_num_channels() == num_channels
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[4, 8, 16, 32],
)), name_func=get_test_name)
def test_wav_multiple_channels(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` can check wav file with channels more than 2 correctly"""
duration = 1
path = self.get_temp_path(f'{dtype}_{sample_rate}_{num_channels}.wav')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
bit_depth=sox_utils.get_bit_depth(dtype),
encoding=sox_utils.get_encoding(dtype),
duration=duration,
)
info = sox_io_backend.info(path)
assert info.get_sample_rate() == sample_rate
assert info.get_num_frames() == sample_rate * duration
assert info.get_num_channels() == num_channels
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[96, 128, 160, 192, 224, 256, 320],
)), name_func=get_test_name)
def test_mp3(self, sample_rate, num_channels, bit_rate):
"""`sox_io_backend.info` can check mp3 file correctly"""
duration = 1
path = self.get_temp_path(f'{sample_rate}_{num_channels}_{bit_rate}k.mp3')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
compression=bit_rate, duration=duration,
)
info = sox_io_backend.info(path)
assert info.get_sample_rate() == sample_rate
# mp3 does not preserve the number of samples
# assert info.get_num_frames() == sample_rate * duration
assert info.get_num_channels() == num_channels
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=get_test_name)
def test_flac(self, sample_rate, num_channels, compression_level):
"""`sox_io_backend.info` can check flac file correctly"""
duration = 1
path = self.get_temp_path(f'{sample_rate}_{num_channels}_{compression_level}.flac')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
compression=compression_level, duration=duration,
)
info = sox_io_backend.info(path)
assert info.get_sample_rate() == sample_rate
assert info.get_num_frames() == sample_rate * duration
assert info.get_num_channels() == num_channels
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[-1, 0, 1, 2, 3, 3.6, 5, 10],
)), name_func=get_test_name)
def test_vorbis(self, sample_rate, num_channels, quality_level):
"""`sox_io_backend.info` can check vorbis file correctly"""
duration = 1
path = self.get_temp_path(f'{sample_rate}_{num_channels}_{quality_level}.vorbis')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
compression=quality_level, duration=duration,
)
info = sox_io_backend.info(path)
assert info.get_sample_rate() == sample_rate
assert info.get_num_frames() == sample_rate * duration
assert info.get_num_channels() == num_channels
| 38.443478 | 91 | 0.649853 |
7954b2836afcd71be317b530ea8cf144d8bc05fd | 486 | py | Python | rammon/signals.py | jackadamson/rammon | 25bc059fda066d8b24a239938a8c2cb90d18ba02 | [
"MIT"
] | null | null | null | rammon/signals.py | jackadamson/rammon | 25bc059fda066d8b24a239938a8c2cb90d18ba02 | [
"MIT"
] | null | null | null | rammon/signals.py | jackadamson/rammon | 25bc059fda066d8b24a239938a8c2cb90d18ba02 | [
"MIT"
] | null | null | null | import signal
from gi.repository import GLib
def set_handlers(mon):
GLib.unix_signal_add(
GLib.PRIORITY_HIGH,
signal.SIGTERM, # for the given signal
mon.stop, # on this signal, run this function
signal.SIGTERM, # with this argument
)
GLib.unix_signal_add(
GLib.PRIORITY_HIGH,
signal.SIGINT, # for the given signal
mon.stop, # on this signal, run this function
signal.SIGINT, # with this argument
)
| 27 | 54 | 0.641975 |
7954b2d53e005cb8d21e125fb3499a20e6977ca7 | 1,556 | py | Python | insights/parsers/containers_policy.py | TZ3070/insights-core | 13f4fc6bfcb89d76f0255c6259902360a298d619 | [
"Apache-2.0"
] | null | null | null | insights/parsers/containers_policy.py | TZ3070/insights-core | 13f4fc6bfcb89d76f0255c6259902360a298d619 | [
"Apache-2.0"
] | null | null | null | insights/parsers/containers_policy.py | TZ3070/insights-core | 13f4fc6bfcb89d76f0255c6259902360a298d619 | [
"Apache-2.0"
] | null | null | null | """
ContainersPolicy - file ``/etc/containers/policy.json``
=======================================================
"""
from insights import JSONParser, parser
from insights.specs import Specs
@parser(Specs.containers_policy)
class ContainersPolicy(JSONParser):
"""
Class for converting file ``/etc/containers/policy.json``
into a dictionary that matches the JSON string in the file.
Sample file content::
{
"default": [
{
"type": "insecureAcceptAnything"
}
],
"transports": {
"docker": {
"registry.access.redhat.com": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"
}
],
"registry.redhat.io/redhat/redhat-operator-index": [
{
"type": "insecureAcceptAnything"
}
],
"registry.redhat.io": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"
}
]
},
"docker-daemon": {
"": [
{
"type": "insecureAcceptAnything"
}
]
}
}
}
Examples:
>>> len(containers_policy["default"])
1
"""
pass
| 25.933333 | 74 | 0.411954 |
7954b3389e16ef9f64068856f5a1a4585b534ffb | 14,582 | py | Python | tests/components/cover/test_device_action.py | PandaBaer92/core | 4b30c9631f0f0a1ad59005f316b3f03975d2accd | [
"Apache-2.0"
] | null | null | null | tests/components/cover/test_device_action.py | PandaBaer92/core | 4b30c9631f0f0a1ad59005f316b3f03975d2accd | [
"Apache-2.0"
] | null | null | null | tests/components/cover/test_device_action.py | PandaBaer92/core | 4b30c9631f0f0a1ad59005f316b3f03975d2accd | [
"Apache-2.0"
] | 1 | 2022-02-06T20:41:11.000Z | 2022-02-06T20:41:11.000Z | """The tests for Cover device actions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.cover import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
SUPPORT_STOP_TILT,
)
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.mark.parametrize(
"set_state,features_reg,features_state,expected_action_types",
[
(False, 0, 0, []),
(False, SUPPORT_CLOSE_TILT, 0, ["close_tilt"]),
(False, SUPPORT_CLOSE, 0, ["close"]),
(False, SUPPORT_OPEN_TILT, 0, ["open_tilt"]),
(False, SUPPORT_OPEN, 0, ["open"]),
(False, SUPPORT_SET_POSITION, 0, ["set_position"]),
(False, SUPPORT_SET_TILT_POSITION, 0, ["set_tilt_position"]),
(False, SUPPORT_STOP, 0, ["stop"]),
(True, 0, 0, []),
(True, 0, SUPPORT_CLOSE_TILT, ["close_tilt"]),
(True, 0, SUPPORT_CLOSE, ["close"]),
(True, 0, SUPPORT_OPEN_TILT, ["open_tilt"]),
(True, 0, SUPPORT_OPEN, ["open"]),
(True, 0, SUPPORT_SET_POSITION, ["set_position"]),
(True, 0, SUPPORT_SET_TILT_POSITION, ["set_tilt_position"]),
(True, 0, SUPPORT_STOP, ["stop"]),
],
)
async def test_get_actions(
hass,
device_reg,
entity_reg,
set_state,
features_reg,
features_state,
expected_action_types,
):
"""Test we get the expected actions from a cover."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
supported_features=features_reg,
)
if set_state:
hass.states.async_set(
f"{DOMAIN}.test_5678", "attributes", {"supported_features": features_state}
)
await hass.async_block_till_done()
expected_actions = []
expected_actions += [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
}
for action in expected_action_types
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
async def test_get_action_capabilities(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init(empty=True)
platform.ENTITIES.append(
platform.MockCover(
name="Set position cover",
is_on=True,
unique_id="unique_set_pos_cover",
current_cover_position=50,
supported_features=SUPPORT_OPEN
| SUPPORT_CLOSE
| SUPPORT_STOP
| SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_STOP_TILT,
),
)
ent = platform.ENTITIES[0]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 5 # open, close, open_tilt, close_tilt
action_types = {action["type"] for action in actions}
assert action_types == {"open", "close", "stop", "open_tilt", "close_tilt"}
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, "action", action
)
assert capabilities == {"extra_fields": []}
async def test_get_action_capabilities_set_pos(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[1]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_capabilities = {
"extra_fields": [
{
"name": "position",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
}
]
}
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 1 # set_position
action_types = {action["type"] for action in actions}
assert action_types == {"set_position"}
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, "action", action
)
if action["type"] == "set_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_get_action_capabilities_set_tilt_pos(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[3]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_capabilities = {
"extra_fields": [
{
"name": "position",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
}
]
}
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 3
action_types = {action["type"] for action in actions}
assert action_types == {"open", "close", "set_tilt_position"}
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, "action", action
)
if action["type"] == "set_tilt_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_action(hass, enable_custom_integrations):
"""Test for cover actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event_open"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "open",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_close"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "close",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_stop"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "stop",
},
},
]
},
)
await hass.async_block_till_done()
open_calls = async_mock_service(hass, "cover", "open_cover")
close_calls = async_mock_service(hass, "cover", "close_cover")
stop_calls = async_mock_service(hass, "cover", "stop_cover")
hass.bus.async_fire("test_event_open")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 0
assert len(stop_calls) == 0
hass.bus.async_fire("test_event_close")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
assert len(stop_calls) == 0
hass.bus.async_fire("test_event_stop")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
assert len(stop_calls) == 1
async def test_action_tilt(hass, enable_custom_integrations):
"""Test for cover tilt actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event_open"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "open_tilt",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_close"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "close_tilt",
},
},
]
},
)
await hass.async_block_till_done()
open_calls = async_mock_service(hass, "cover", "open_cover_tilt")
close_calls = async_mock_service(hass, "cover", "close_cover_tilt")
hass.bus.async_fire("test_event_open")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 0
hass.bus.async_fire("test_event_close")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
hass.bus.async_fire("test_event_stop")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
async def test_action_set_position(hass, enable_custom_integrations):
"""Test for cover set position actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_pos",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "set_position",
"position": 25,
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_tilt_pos",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "set_tilt_position",
"position": 75,
},
},
]
},
)
await hass.async_block_till_done()
cover_pos_calls = async_mock_service(hass, "cover", "set_cover_position")
tilt_pos_calls = async_mock_service(hass, "cover", "set_cover_tilt_position")
hass.bus.async_fire("test_event_set_pos")
await hass.async_block_till_done()
assert len(cover_pos_calls) == 1
assert cover_pos_calls[0].data["position"] == 25
assert len(tilt_pos_calls) == 0
hass.bus.async_fire("test_event_set_tilt_pos")
await hass.async_block_till_done()
assert len(cover_pos_calls) == 1
assert len(tilt_pos_calls) == 1
assert tilt_pos_calls[0].data["tilt_position"] == 75
| 33.75463 | 87 | 0.587642 |
7954b45a0d402242b786e3583a748bfa85166289 | 3,107 | py | Python | ucsmsdk/mometa/extvmm/ExtvmmEpFsmTask.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/extvmm/ExtvmmEpFsmTask.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/extvmm/ExtvmmEpFsmTask.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for ExtvmmEpFsmTask ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ExtvmmEpFsmTaskConsts():
COMPLETION_CANCELLED = "cancelled"
COMPLETION_COMPLETED = "completed"
COMPLETION_PROCESSING = "processing"
COMPLETION_SCHEDULED = "scheduled"
ITEM_CLUSTER_ROLE = "clusterRole"
ITEM_NOP = "nop"
class ExtvmmEpFsmTask(ManagedObject):
"""This is ExtvmmEpFsmTask class."""
consts = ExtvmmEpFsmTaskConsts()
naming_props = set([u'item'])
mo_meta = MoMeta("ExtvmmEpFsmTask", "extvmmEpFsmTask", "task-[item]", VersionMeta.Version201m, "OutputOnly", 0xf, [], [""], [u'extvmmEp'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion": MoPropertyMeta("completion", "completion", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cancelled", "completed", "processing", "scheduled"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"flags": MoPropertyMeta("flags", "flags", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, r"""(defaultValue){0,1}""", [], []),
"item": MoPropertyMeta("item", "item", "string", VersionMeta.Version201m, MoPropertyMeta.NAMING, None, None, None, None, ["clusterRole", "nop"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"seq_id": MoPropertyMeta("seq_id", "seqId", "uint", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201m, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completion": "completion",
"dn": "dn",
"flags": "flags",
"item": "item",
"rn": "rn",
"sacl": "sacl",
"seqId": "seq_id",
"status": "status",
}
def __init__(self, parent_mo_or_dn, item, **kwargs):
self._dirty_mask = 0
self.item = item
self.child_action = None
self.completion = None
self.flags = None
self.sacl = None
self.seq_id = None
self.status = None
ManagedObject.__init__(self, "ExtvmmEpFsmTask", parent_mo_or_dn, **kwargs)
| 50.112903 | 249 | 0.64757 |
7954b54db2c57f852a99e223e3273414f033919c | 5,061 | py | Python | tests/validators/test_format_validators.py | kodemore/opyapi | 85e61274f81258d767722bc341427b88dddbcf00 | [
"MIT"
] | 1 | 2021-08-14T15:40:34.000Z | 2021-08-14T15:40:34.000Z | tests/validators/test_format_validators.py | kodemore/opyapi | 85e61274f81258d767722bc341427b88dddbcf00 | [
"MIT"
] | 2 | 2021-11-10T22:26:00.000Z | 2021-11-11T06:28:46.000Z | tests/validators/test_format_validators.py | kodemore/opyapi | 85e61274f81258d767722bc341427b88dddbcf00 | [
"MIT"
] | null | null | null | from base64 import b64encode
import pytest
from opyapi import StringFormat
from opyapi.errors import FormatValidationError
from opyapi.validators import validate_string_format
@pytest.mark.parametrize(
"given_string, given_format",
[
["yes", StringFormat.BOOLEAN],
["ok", StringFormat.BOOLEAN],
["1", StringFormat.BOOLEAN],
["true", StringFormat.BOOLEAN],
["y", StringFormat.BOOLEAN],
["on", StringFormat.BOOLEAN],
["no", StringFormat.BOOLEAN],
["nope", StringFormat.BOOLEAN],
["off", StringFormat.BOOLEAN],
["false", StringFormat.BOOLEAN],
["0", StringFormat.BOOLEAN],
[b64encode(b"format").decode("utf8"), StringFormat.BYTE],
["20201220", StringFormat.DATE],
["20201220T121314", StringFormat.DATE_TIME],
["12.1234", StringFormat.DECIMAL],
["email@example.com", StringFormat.EMAIL],
["email@subdomain.example.com", StringFormat.EMAIL],
["firstname.lastname@example.com", StringFormat.EMAIL],
["firstname+lastname@example.com", StringFormat.EMAIL],
["email@123.123.123.123", StringFormat.EMAIL],
["1234567890@example.com", StringFormat.EMAIL],
["email@example-one.com", StringFormat.EMAIL],
["_______@example.com", StringFormat.EMAIL],
["email@example.name", StringFormat.EMAIL],
["email@example.museum", StringFormat.EMAIL],
["email@example.co.jp", StringFormat.EMAIL],
["firstname-lastname@example.com", StringFormat.EMAIL],
["google.com", StringFormat.HOSTNAME],
["test.foo.bar", StringFormat.HOSTNAME],
["localhost", StringFormat.HOSTNAME],
["0.0.0.0", StringFormat.IP_ADDRESS],
["127.0.0.1", StringFormat.IP_ADDRESS],
["1200:0000:AB00:1234:0000:2552:7777:1313", StringFormat.IP_ADDRESS],
["21DA:D3:0:2F3B:2AA:FF:FE28:9C5A", StringFormat.IP_ADDRESS],
["0.0.0.0", StringFormat.IP_ADDRESS_V4],
["127.0.0.1", StringFormat.IP_ADDRESS_V4],
["1200:0000:AB00:1234:0000:2552:7777:1313", StringFormat.IP_ADDRESS_V6],
["21DA:D3:0:2F3B:2AA:FF:FE28:9C5A", StringFormat.IP_ADDRESS_V6],
["0.", StringFormat.PATTERN],
["[a-z]", StringFormat.PATTERN],
["1.0.0", StringFormat.SEMVER],
["1.0.0-alpha", StringFormat.SEMVER],
["1.0.0-alpha.1", StringFormat.SEMVER],
["1.0.0-0.3.7", StringFormat.SEMVER],
["1.0.0-x.7.z.92", StringFormat.SEMVER],
["12:15:18", StringFormat.TIME],
["P1W", StringFormat.TIME_DURATION],
["PT1H", StringFormat.TIME_DURATION],
["http://foo.com/blah_blah", StringFormat.URI],
["spotify://userid:password@example.com", StringFormat.URI],
["https://142.42.1.1:8080/", StringFormat.URI],
["slack://124435", StringFormat.URI],
["http://foo.com/blah_blah", StringFormat.URL],
["http://foo.com/blah_blah/", StringFormat.URL],
["https://www.example.com/foo/?bar=baz&inga=42&quux", StringFormat.URL],
["http://userid:password@example.com", StringFormat.URL],
["http://142.42.1.1:8080/", StringFormat.URL],
["http://142.42.1.1/", StringFormat.URL],
["http://code.google.com/events/#&product=browser", StringFormat.URL],
["http://a.b-c.de", StringFormat.URL],
["https://foo_bar.example.com/", StringFormat.URL],
["http://jabber.tcp.gmail.com", StringFormat.URL],
["http://_jabber._tcp.gmail.com", StringFormat.URL],
["http://مثال.إختبار", StringFormat.URL],
["cff801a5-5db7-4287-9414-64ba51a9a730", StringFormat.UUID],
["ad047288-b643-4cd0-8c79-354f68140bef", StringFormat.UUID],
["b11b1836-ad3e-4944-9c80-eaccdac0487b", StringFormat.UUID],
["e643c4f2-f9c1-4287-b465-1e02ba7d902d", StringFormat.UUID],
["57766d9b-9ea2-4740-9b26-56dfdd79678a", StringFormat.UUID],
],
)
def test_pass_valid_format(given_string: str, given_format: str) -> None:
assert validate_string_format(given_string, given_format)
@pytest.mark.parametrize(
"given_string, given_format",
[
["invalid", StringFormat.BOOLEAN],
["invalid", StringFormat.BYTE],
["invalid", StringFormat.DATE],
["invalid", StringFormat.DATE_TIME],
["invalid", StringFormat.DECIMAL],
["invalid", StringFormat.EMAIL],
["__invalid", StringFormat.HOSTNAME],
["invalid", StringFormat.IP_ADDRESS],
["invalid", StringFormat.IP_ADDRESS_V4],
["invalid", StringFormat.IP_ADDRESS_V6],
["[0-$", StringFormat.PATTERN],
["invalid", StringFormat.SEMVER],
["invalid", StringFormat.TIME],
["invalid", StringFormat.TIME_DURATION],
["invalid", StringFormat.URI],
["invalid", StringFormat.URL],
["invalid", StringFormat.UUID],
],
)
def test_fail_invalid_format(given_string: str, given_format: str) -> None:
with pytest.raises(FormatValidationError):
validate_string_format(given_string, given_format)
| 44.787611 | 80 | 0.635052 |
7954b668e79480a54643dfbd4ddfcd96c5b23a76 | 735 | py | Python | src/htc_calculator/config.py | DerMaxxiKing/htc_calculator | 10d2e31a1cb4256fdcbe25ec915d7280927a064a | [
"MIT"
] | null | null | null | src/htc_calculator/config.py | DerMaxxiKing/htc_calculator | 10d2e31a1cb4256fdcbe25ec915d7280927a064a | [
"MIT"
] | null | null | null | src/htc_calculator/config.py | DerMaxxiKing/htc_calculator | 10d2e31a1cb4256fdcbe25ec915d7280927a064a | [
"MIT"
] | null | null | null | import sys
# print('Importing FreeCAD and Modules')
# sys.path.append('/usr/lib/freecad/lib')
# print('Importing FreeCAD and Modules')
#
#
# sys.path.append('/tmp/squashfs-root/usr/lib/python38.zip')
# sys.path.append('/tmp/squashfs-root/usr/lib/python3.8')
# sys.path.append('/tmp/squashfs-root/usr/lib/python3.8/lib-dynload')
# sys.path.append('/tmp/squashfs-root/usr/lib/python3.8/site-packages')
# sys.path.append('/tmp/squashfs-root/usr/lib/')
# sys.path.append('mp/squashfs-root/usr/lib/python3.8/lib-dynload')
# sys.path.append('/tmp/squashfs-root/usr/lib/python3.8/site-packages')
# sys.path.append('mp/squashfs-root/usr/Ext')
# sys.path.append('mp/squashfs-root/usr/lib')
work_dir = '/tmp'
n_proc = 8
| 33.409091 | 72 | 0.702041 |
7954b699a58d4beb3052fbba6bfe02d5d18c2971 | 769 | py | Python | .local/bin/256colours/colortest.py | antonyfg/My_config_files | 28d99b381193bf0641fe2281e07982d85874dde9 | [
"WTFPL"
] | 5 | 2016-02-16T14:39:00.000Z | 2021-06-29T23:36:37.000Z | .local/bin/256colours/colortest.py | antonyfg/My_config_files | 28d99b381193bf0641fe2281e07982d85874dde9 | [
"WTFPL"
] | null | null | null | .local/bin/256colours/colortest.py | antonyfg/My_config_files | 28d99b381193bf0641fe2281e07982d85874dde9 | [
"WTFPL"
] | 1 | 2017-03-01T05:00:39.000Z | 2017-03-01T05:00:39.000Z | #!/usr/bin/env python
# Ported to Python from http://www.vim.org/scripts/script.php?script_id=1349
print "Color indexes should be drawn in bold text of the same color."
print
colored = [0] + [0x5f + 40 * n for n in range(0, 5)]
colored_palette = [
"%02x/%02x/%02x" % (r, g, b)
for r in colored
for g in colored
for b in colored
]
grayscale = [0x08 + 10 * n for n in range(0, 24)]
grayscale_palette = [
"%02x/%02x/%02x" % (a, a, a)
for a in grayscale
]
normal = "\033[38;5;%sm"
bold = "\033[1;38;5;%sm"
reset = "\033[0m"
for (i, color) in enumerate(colored_palette + grayscale_palette, 16):
index = (bold + "%4s" + reset) % (i, str(i) + ':')
hex = (normal + "%s" + reset) % (i, color)
newline = '\n' if i % 6 == 3 else ''
print index, hex, newline,
| 25.633333 | 76 | 0.613784 |
7954b72406bfa07a0ef7d2e41aeebdaa6392ce58 | 5,990 | py | Python | _build/jupyter_execute/join_all_processed.py | harnalashok/credit_risk | 8ad0426d8ac66a115ef6b6feb8a8a05119c1cd9e | [
"Apache-2.0"
] | 1 | 2021-08-03T13:27:10.000Z | 2021-08-03T13:27:10.000Z | _build/jupyter_execute/join_all_processed.py | harnalashok/credit_risk | 8ad0426d8ac66a115ef6b6feb8a8a05119c1cd9e | [
"Apache-2.0"
] | null | null | null | _build/jupyter_execute/join_all_processed.py | harnalashok/credit_risk | 8ad0426d8ac66a115ef6b6feb8a8a05119c1cd9e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Joining all processed data
# This notebook joins all processed data and then saves it in a file for subsequent modeling.
# In[87]:
# Last amended: 24th October, 2020
# Myfolder: C:\Users\Administrator\OneDrive\Documents\home_credit_default_risk
# Objective:
# Solving Kaggle problem: Home Credit Default Risk
# Joining all processed datasets
#
# Data Source: https://www.kaggle.com/c/home-credit-default-risk/data
# Ref: https://www.kaggle.com/jsaguiar/lightgbm-with-simple-features
# In[88]:
# 1.0 Libraries
# (Some of these may not be needed here.)
get_ipython().run_line_magic('reset', '-f')
import numpy as np
import pandas as pd
import gc
# 1.1 Reduce read data size
# There is a file reducing.py
# in this folder. A class
# in it is used to reduce
# dataframe size
# (Code modified by me to
# exclude 'category' dtype)
import reducing
# 1.2 Misc
import warnings
import os
warnings.simplefilter(action='ignore', category=FutureWarning)
# In[89]:
# 1.3
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
# In[90]:
# 1.4 Display multiple commands outputs from a cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# In[91]:
# 2.0 Prepare to read data
pathToData = "C:\\Users\\Administrator\\OneDrive\\Documents\\home_credit_default_risk"
os.chdir(pathToData)
# In[92]:
# 2.1 Some constants
num_rows=None # Implies read all rows
nan_as_category = True # While transforming
# 'object' columns to dummies
# In[93]:
# 3.0 Read previous application data first
df = pd.read_csv(
'processed_df.csv.zip',
nrows = num_rows
)
# 3.0.1 Reduce memory usage by appropriately
# changing data-types per feature:
df = reducing.Reducer().reduce(df)
# In[94]:
# 3.1
df.shape # (356251, 262)
df.head(2)
# In[95]:
# 3.2
df.columns
df.drop(columns = ['Unnamed: 0', 'index'], inplace = True)
df.columns
# In[96]:
# 3.3
df.head(2)
# In[97]:
# 3.4 Set SK_ID_CURR as Index
df = df.set_index('SK_ID_CURR')
df.head(2)
df.shape # (356251, 259)
# In[98]:
# 4.0 Read bureau_agg
bureau_agg = pd.read_csv(
'processed_bureau_agg.csv.zip',
nrows = num_rows
)
# 4.0.1 Reduce memory usage by appropriately
# changing data-types per feature:
bureau_agg = reducing.Reducer().reduce(bureau_agg)
# In[99]:
# 4.1 Set index
bureau_agg.head(2)
bureau_agg = bureau_agg.set_index("SK_ID_CURR")
bureau_agg.head(2)
bureau_agg.shape # (305811, 116)
# In[100]:
# 5.0 Join bureau_agg with df
df = df.join(
bureau_agg,
how='left',
on='SK_ID_CURR'
)
# In[101]:
# 5.1
df.shape # (356251, 375)
df.head(2)
# In[102]:
# 5.2 Read previous application data
prev_agg = pd.read_csv(
'processed_prev_agg.csv.zip',
nrows = num_rows
)
# 5.3 Reduce memory usage by appropriately
# changing data-types per feature:
prev_agg = reducing.Reducer().reduce(prev_agg)
# In[103]:
# 5.3 Set Index
prev_agg.shape # (338857, 250)
prev_agg.head(2)
prev_agg = prev_agg.set_index("SK_ID_CURR")
prev_agg.head(2)
prev_agg.shape # (338857, 250)
# In[104]:
# 6.0 Join prev_agg with df
df = df.join(prev_agg, how='left', on='SK_ID_CURR')
df.shape # (356251, 624)
df.head(2)
# In[105]:
# 7.0 Read processed POS data
pos_agg = pd.read_csv(
'processed_pos_agg.csv.zip',
nrows = num_rows
)
# 7.0.1 Reduce memory usage by appropriately
# changing data-types per feature:
pos_agg = reducing.Reducer().reduce(pos_agg)
# In[106]:
# 7.1
pos_agg.shape # (337252, 19)
pos_agg.head(2)
pos_agg = pos_agg.set_index("SK_ID_CURR")
pos_agg.head(2)
pos_agg.shape # (337252, 18)
# In[107]:
# 7.2 Join POS with df
df = df.join(
pos_agg,
how='left',
on='SK_ID_CURR'
)
df.shape # (356251, 642)
df.head(2)
# In[108]:
# 8.0 Read processed installments data
ins_agg = pd.read_csv(
'processed_ins_agg.csv.zip',
nrows = num_rows
)
# 8.0.1 Reduce memory usage by appropriately
# changing data-types per feature:
ins_agg = reducing.Reducer().reduce(ins_agg)
# In[109]:
# 8.1 Set index
ins_agg.shape # (339587, 26)
ins_agg.head(2)
ins_agg = ins_agg.set_index("SK_ID_CURR")
ins_agg.head(2)
ins_agg.shape # (339587, 25)
# In[110]:
# 9.0 Join Installments data with df
df = df.join(ins_agg, how='left', on='SK_ID_CURR')
df.shape # (356251, 667)
df.head(2)
# In[111]:
# 10.0 Read Credit card data
cc_agg = pd.read_csv(
'processed_creditCard_agg.csv.zip',
nrows = num_rows
)
# 10.0.1 Reduce memory usage by appropriately
# changing data-types per feature:
cc_agg = reducing.Reducer().reduce(cc_agg)
# In[112]:
# 10.1 Set Index
cc_agg.shape # (103558, 142)
cc_agg.head(2)
cc_agg = cc_agg.set_index("SK_ID_CURR")
cc_agg.head(2)
cc_agg.shape # (103558, 141)
# In[113]:
# 11. Join Credit card data with df
df = df.join(cc_agg, how='left', on='SK_ID_CURR')
df.shape # (356251, 808)
df.head(2)
# In[114]:
# 11.1 Save the results for subsequent use:
df.to_csv("processed_df_joined.csv.zip", compression = "zip")
# In[ ]:
##################
| 18.262195 | 94 | 0.588648 |
7954b75729275e58b4893b6bb42cf52556ad8156 | 2,542 | py | Python | session-1/sess1_gabor_kernel.py | marcreyesph/tf-selfpacedtuts | fd0393ef8578fc65042b1270575a16df6bd30605 | [
"MIT"
] | null | null | null | session-1/sess1_gabor_kernel.py | marcreyesph/tf-selfpacedtuts | fd0393ef8578fc65042b1270575a16df6bd30605 | [
"MIT"
] | null | null | null | session-1/sess1_gabor_kernel.py | marcreyesph/tf-selfpacedtuts | fd0393ef8578fc65042b1270575a16df6bd30605 | [
"MIT"
] | null | null | null | from skimage import data
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
# Minimize console warnings by tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
sess = tf.InteractiveSession()
img_dataset = [os.path.join('celeba_dataset_minified', img_i)
for img_i in os.listdir('celeba_dataset_minified')
if '.jpg' in img_i]
img_dataset_read = [plt.imread(img_i)
for img_i in img_dataset]
img_data = np.array(img_dataset_read)
img_data_mean = np.mean(img_data, axis=0)
img_data_std = np.std(img_data, axis=0)
img_normalized = ((img_data[0] - img_data_mean) / img_data_std)
"""plt.hist(img_normalized.ravel(), 20)
print(img_normalized.shape)
plt.show()
"""
# The image tensor
img = tf.placeholder(tf.float32, shape=[None, None], name='img')
# Make 2-D to 3-D (HxW) to (HxWx1)
"""tf.expand_dims() takes two parameters, the base tensor and the column where
we want to insert the new dimension
"""
# Insert new dimension to column two [x: y: <here>] cf. [0: 1: 2]
img_3d = tf.expand_dims(img, 2)
dims = img_3d.get_shape()
print(dims)
# Insert new dimension to column zero or the start [<here>: y, z, a] cf. [0: 1: 2: 3]
img_4d = tf.expand_dims(img_3d, 0)
print(img_4d.get_shape().as_list())
# Create placeholders for gabor's params
mean = tf.placeholder(tf.float32, name='mean')
sigma = tf.placeholder(tf.float32, name='sigma')
ksize = tf.placeholder(tf.int32, name='ksize')
# Redo set of operations for creation of gabor kernel
# Linspace
x = tf.linspace(-3.0, 3.0, ksize)
# Gaussian curve or normal distrib curve
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
# 2-D matrix [Nx1] x [1xN]
z_2d = tf.matmul(
tf.reshape(z, tf.stack([ksize, 1])),
tf.reshape(z, tf.stack([1, ksize])))
ys = tf.sin(x)
ys = tf.reshape(ys, tf.stack([ksize, 1]))
ones = tf.ones(tf.stack([1, ksize]))
# Repeatedly multiply one to ys
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
gabor_4d = tf.reshape(gabor, tf.stack([ksize, ksize, 1, 1]))
# The convolution part takes a little longer time to compile
# Convolve the two
convolved = tf.nn.conv2d(img_4d, gabor_4d, strides=[1, 1, 1, 1], padding='SAME', name='convolved')
convolved_img = convolved[0, :, :, 0]
# Show result
result = convolved_img.eval(feed_dict={
img: data.camera(),
mean: 0.0,
sigma: 1.0,
ksize: 100
})
plt.imshow(result, cmap='gray')
plt.show() | 30.626506 | 99 | 0.669158 |
7954b9308ef97874ef7051f8d3df44ecad9ebf05 | 845 | py | Python | endaccgen.py | alexgonzdev/endaccountgen | 4563e08b18c57502475d729da4ff7e461bb383aa | [
"MIT"
] | 1 | 2020-10-11T15:54:33.000Z | 2020-10-11T15:54:33.000Z | endaccgen.py | alexgonzdev/endaccountgen | 4563e08b18c57502475d729da4ff7e461bb383aa | [
"MIT"
] | 1 | 2021-07-01T01:40:16.000Z | 2021-07-01T01:40:16.000Z | endaccgen.py | alexgonzdev/endaccountgen | 4563e08b18c57502475d729da4ff7e461bb383aa | [
"MIT"
] | null | null | null | import requests
import json
headers = {
"Accept":"application/json",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"en-US,en;q=0.5",
"Access-Control-Allow-Credentials":"true",
"Cache-Control":"no-cache",
"Connection":"keep-alive",
"Content-Type":"application/json; charset=UTF-8",
"Host":"launches-api.endclothing.com",
"Origin":"https://launches.endclothing.com",
"Pragma":"no-cache",
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:74.0) Gecko/20100101 Firefox/74.0"
}
registerurl = "https://launches-api.endclothing.com/api/account"
registerpayload = {"email":"jfjwaoijoiajio@gmail.com","firstName":"xacjwdiaji","lastName":"vhdihviuwq","password":"xncihiH38","websiteId":1,"optIn":0}
session = requests.Session()
register = session.post(registerurl, json=registerpayload, headers=headers)
print(register.text)
| 31.296296 | 150 | 0.733728 |
7954b939ddc6ce3adf699e9ba779bb3d9f59ef59 | 449 | py | Python | 001-100/12/12.py | junwei-wang/project-euler | abd728037e0f73f0f52f2ae4d40d468b307f34de | [
"MIT"
] | null | null | null | 001-100/12/12.py | junwei-wang/project-euler | abd728037e0f73f0f52f2ae4d40d468b307f34de | [
"MIT"
] | null | null | null | 001-100/12/12.py | junwei-wang/project-euler | abd728037e0f73f0f52f2ae4d40d468b307f34de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import sqrt, ceil
def get_fac_num(n):
if n == 1:
return 1
sqr = int(ceil(sqrt(n)))
sum = 2
if sqr * sqr == n:
sum += 1
for i in range(2, sqr):
if n % i == 0:
sum += 2
return sum
i = 10
while True:
tri = i * (i+1) / 2
fac_num = get_fac_num(tri)
if fac_num > 500:
print i, tri, fac_num
break
i += 1
| 16.62963 | 30 | 0.474388 |
7954b9a1b9f65d17bf20213cdeec160c1876d8c0 | 85 | py | Python | setup.py | isidentical/pynano | 29ece35782cacfb3cfcf35bf9b50e6a11cf44d34 | [
"MIT"
] | 2 | 2019-09-15T16:26:32.000Z | 2019-09-15T17:20:40.000Z | setup.py | pynano/pynano | 29ece35782cacfb3cfcf35bf9b50e6a11cf44d34 | [
"MIT"
] | null | null | null | setup.py | pynano/pynano | 29ece35782cacfb3cfcf35bf9b50e6a11cf44d34 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(name="pynano", version="0.1.0a0")
| 21.25 | 43 | 0.764706 |
7954ba3ba50c3cf20c401fc4afebfc92242c7879 | 4,104 | py | Python | example_problems/tutorial/chococroc/services/check_game_value.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 4 | 2021-06-27T13:27:24.000Z | 2022-03-24T10:46:28.000Z | example_problems/tutorial/chococroc/services/check_game_value.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | null | null | null | example_problems/tutorial/chococroc/services/check_game_value.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 5 | 2021-04-01T15:21:57.000Z | 2022-01-29T15:07:38.000Z | #!/usr/bin/env python3
import sys
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
import chococroc_lib as cl
# METADATA OF THIS TAL_SERVICE:
problem="chococroc"
service="check_game_value"
args_list = [
('m',int),
('n',int),
('value',int),
('silent',bool)
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
grundy_val = cl.grundy_val(ENV['m'], ENV['n'])
#print(f"grundy_val={grundy_val}")
if ENV['value'] == -2:
if grundy_val == 0:
TAc.NO()
TAc.print(LANG.render_feedback("not-a-winning-conf", f'Contrary to your conjecture, the configuration {ENV["m"]} x {ENV["n"]} is NOT a winning one.'), "red")
TAc.print(LANG.render_feedback("not-a-winning-conf-wanna-play", f'You can check this out playing a game against our service \'play\', starting first on configuration {ENV["m"]} x {ENV["n"]}. If you succeed winning then you disprove our claim or the optimality of our player (either way, let us know).'), "yellow", ["bold"])
elif not ENV['silent']:
TAc.OK()
TAc.print(LANG.render_feedback("ok-winning-conf", f'We agree with your conjecture that the configuration {ENV["m"]} x {ENV["n"]} is a winning one.'), "green", ["bold"])
if ENV['value'] == -1:
if grundy_val != 0:
TAc.NO()
TAc.print(LANG.render_feedback("not-a-lost-conf", f'Contrary to your conjecture, the configuration {ENV["m"]} x {ENV["n"]} is NOT a lost one.'), "red")
TAc.print(LANG.render_feedback("not-a-lost-conf-wanna-play", f'You can check this out playing a game against our service \'play\', playing as second a game starting from configuration {ENV["m"]} x {ENV["n"]}. If you succeed winning then you disprove our claim or the optimality of our player (either way, let us know).'), "yellow", ["bold"])
elif not ENV['silent']:
TAc.OK()
TAc.print(LANG.render_feedback("ok-lost-conf", f'We agree with your conjecture that the configuration {ENV["m"]} x {ENV["n"]} is a lost one.'), "green", ["bold"])
if ENV['value'] >= 0:
if grundy_val != ENV['value']:
TAc.NO()
TAc.print(LANG.render_feedback("wrong-grundy-val", f'Contrary to your conjecture, the grundy value of configuration {ENV["m"]} x {ENV["n"]} is NOT {ENV["value"]}.'), "red")
if grundy_val * ENV['value'] != 0:
TAc.print(LANG.render_feedback("wrong-grundy-val-play", f'You can check this out playing a game against our service \'play_val_measuring_game\', starting second on configuration (chocolate_bar={ENV["m"]} x {ENV["n"]}, single_NIM_tower={ENV["value"]}). If you succeed winning then you disprove our claim or the optimality of our player (either way, let us know).'), "yellow", ["bold"])
elif grundy_val == 0:
TAc.print(LANG.render_feedback("not-a-winning-conf", f'Contrary to your conjecture, the configuration {ENV["m"]} x {ENV["n"]} is NOT a winning one.'), "red")
TAc.print(LANG.render_feedback("not-a-winning-conf-wanna-play", f'You can check this out playing a game against our service \'play\', starting first on configuration {ENV["m"]} x {ENV["n"]}. If you succeed winning then you disprove our claim or the optimality of our player (either way, let us know).'), "yellow", ["bold"])
else:
TAc.print(LANG.render_feedback("not-a-lost-conf", f'Contrary to your conjecture, the configuration {ENV["m"]} x {ENV["n"]} is NOT a lost one.'), "red")
TAc.print(LANG.render_feedback("not-a-lost-conf-wanna-play", f'You can check this out playing a game against our service \'play\', playing as second a game starting from configuration {ENV["m"]} x {ENV["n"]}. If you succeed winning then you disprove our claim or the optimality of our player (either way, let us know).'), "yellow", ["bold"])
elif not ENV['silent']:
TAc.OK()
TAc.print(LANG.render_feedback("ok-grundy-val", f'We agree with your conjecture that the configuration {ENV["m"]} x {ENV["n"]} has grundy value {grundy_val}.'), "green", ["bold"])
exit(0)
| 66.193548 | 396 | 0.664717 |
7954babebec1f97f71efac501a2734bc187f2b78 | 8,437 | py | Python | tests/fixtures.py | jamilatta/citedby | b0a7146cc1d0972da678d934d93f92d68c8eb126 | [
"BSD-2-Clause"
] | null | null | null | tests/fixtures.py | jamilatta/citedby | b0a7146cc1d0972da678d934d93f92d68c8eb126 | [
"BSD-2-Clause"
] | null | null | null | tests/fixtures.py | jamilatta/citedby | b0a7146cc1d0972da678d934d93f92d68c8eb126 | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
doi_response = [
{
"doi": u"http://dx.doi.org/10.1161/01.res.59.2.178",
"score": 18.42057,
"normalizedScore": 100,
"title": u"Power spectral analysis of heart rate and arterial pressure variabilities as a marker of sympatho-vagal interaction in man and conscious dog",
"fullCitation": u"M. Pagani, F. Lombardi, S. Guzzetti, O. Rimoldi, R. Furlan, P. Pizzinelli, G. Sandrone, G. Malfatto, S. Dell'Orto, E. Piccaluga, 1986, 'Power spectral analysis of heart rate and arterial pressure variabilities as a marker of sympatho-vagal interaction in man and conscious dog', <i>Circulation Research</i>, vol. 59, no. 2, pp. 178-193",
"coins": u"ctx_ver=Z39.88-2004&rft_id=info%3Adoi%2Fhttp%3A%2F%2Fdx.doi.org%2F10.1161%2F01.res.59.2.178&rfr_id=info%3Asid%2Fcrossref.org%3Asearch&rft.atitle=Power+spectral+analysis+of+heart+rate+and+arterial+pressure+variabilities+as+a+marker+of+sympatho-vagal+interaction+in+man+and+conscious+dog&rft.jtitle=Circulation+Research&rft.date=1986&rft.volume=59&rft.issue=2&rft.spage=178&rft.epage=193&rft.aufirst=M.&rft.aulast=Pagani&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.au=M.+Pagani&rft.au=+F.+Lombardi&rft.au=+S.+Guzzetti&rft.au=+O.+Rimoldi&rft.au=+R.+Furlan&rft.au=+P.+Pizzinelli&rft.au=+G.+Sandrone&rft.au=+G.+Malfatto&rft.au=+S.+Dell%27Orto&rft.au=+E.+Piccaluga",
"year": u"1986"
}
]
article = {
"code": "S0101-31222002000100038",
"title": {
"v35": [
{"_": u"PRINT"}
],
"v935": [
{"_": u"0101-3122"}
],
"v400": [
{"_": u"0101-3122"}
],
"v901": [
{"l": "es", "_": u"Publicar trabajos científicos originales en Ciencia y Tecnología de Semillas, con los avances del conocimiento en la disciplina, para promover la agricultura brasileia e internacional."},
{"l": "pt", "_": u"Publicar trabalhos originais de contribuição científica, em Ciência e Tecnologia de Sementes, divulgando ao setor agrícola nacional e internacional , avanços do conhecimento nessa área."},
{"l": "en", "_": u"Publish original scientific studies in Seed Science and Technology, promote Brazilian and international agriculture and the advances in these areas."}
],
"v100": [
{"_": "Revista Brasileira de Sementes"}
],
"v690": [
{"_": u"www.scielo.br"}
],
},
"article": {
"v40": [
{"_": "pt"}
],
"v12": [
{"l": "pt", "_": u"Estratégias de luta das enfermeiras da Maternidade Leila Diniz para implantação de um modelo humanizado de assistência ao parto"},
{"l": "en", "_": u"Nursing fighting strategies in the Leila Diniz Maternity towards the implantation of a humanized model for delivery care"},
{"l": "es", "_": u"Estrategias de lucha de las enfermeras de la Maternidad Leila Diniz para la implantación de un modelo humanizado de asistencia al parto"}
],
"v880": [{"_": "S0101-31222002000100038"}],
"v65": [{"_": "20091200"}],
"v10": [
{u'1': u'A01', u's': u'Progianti', u'r': u'ND', u'_': u'', u'n': u'Jane Márcia'},
{u's': u'Porfírio', u'r': u'ND', u'_': u'', u'n': u'Aline Bastos'},
{u'1': u'A02', u's': u'Pereira', u'r': u'ND', u'_': u'', u'n': u'Adriana Lenho de Figueiredo'}
],
"v70": [
{u'e': u'jmprogi@uol.com.br', u'i': u'A01', u'1': u'Faculdade de Enfermagem', u'p': u'BRAZIL', u's': u'Rio de Janeiro', u'_': u'Universidade do Estado do Rio de Janeiro'},
{u'e': u'adrianalenho.uerj@gmail.com', u'i': u'A02', u'1': u'Faculdade de Enfermagem', u'p': u'BRAZIL', u's': u'Rio de Janeiro', u'2': u'Departamento de Enfermagem Materno-Infantil', u'_': u'Universidade do Estado do Rio de Janeiro'}
],
}
}
articles = [
{
"code": u"S0104-07072013000100023",
"title": {
"v35": [
{"_": u"PRINT"}
],
"v935": [
{"_": u"0104-0707"}
],
"v150": [
{"_": u"Texto contexto - enferm."}
],
"v151": [
{"_": u"Texto contexto - enferm"}
],
"v400": [
{"_": u"0104-0707"}
],
"v690": [
{"_": u"www.scielo.br"}
],
"v100": [
{"_": u"Texto & Contexto - Enfermagem"}
],
},
"collection": "scl",
"article": {
"v35": [
{"_": u"0104-0707"}
],
"v10": [
{u'1': u'A01', u's': u'Progianti', u'r': u'ND', u'_': u'', u'n': u'Jane Márcia'},
{u's': u'Porfírio', u'r': u'ND', u'_': u'', u'n': u'Aline Bastos'},
{u'1': u'A02', u's': u'Pereira', u'r': u'ND', u'_': u'', u'n': u'Adriana Lenho de Figueiredo'}
],
"v70": [
{u'e': u'jmprogi@uol.com.br', u'i': u'A01', u'1': u'Faculdade de Enfermagem', u'p': u'BRAZIL', u's': u'Rio de Janeiro', u'_': u'Universidade do Estado do Rio de Janeiro'},
{u'e': u'adrianalenho.uerj@gmail.com', u'i': u'A02', u'1': u'Faculdade de Enfermagem', u'p': u'BRAZIL', u's': u'Rio de Janeiro', u'2': u'Departamento de Enfermagem Materno-Infantil', u'_': u'Universidade do Estado do Rio de Janeiro'}
],
"v880": [
{"_": u"S0104-07072013000100023"}
],
"v40": [
{"_": u"en"}
],
"v12": [
{u'l': u'pt', u'_': u'title pt'},
{u'l': u'en', u'_': u'title en'},
{u'l': u'es', u'_': u'title es'}
],
"v83": [
{u'a': u'abstract pt', u'l': u'pt', u'_': u''},
{u'a': u'abstract en', u'l': u'en', u'_': u''},
{u'a': u'abstract es', u'l': u'es', u'_': u''}
],
"v65": [
{"_": "20091200"}
],
"doi": u"10.1590/S0104-07072013000100023",
}
},
{
"code": u"S1414-81452012000300003",
"title": {
"v35": [
{"_": u"PRINT"}
],
"v935": [
{"_": u"1414-8145"}
],
"v150": [
{"_": u"Esc. Anna Nery"}
],
"v151": [
{"_": u"Esc. Anna Nery"}
],
"v400": [
{"_": u"1414-8145"}
],
"v690": [
{"_": u"www.scielo.br"}
],
"v100": [
{"_": u"Escola Anna Nery"}
],
},
"collection": "scl",
"article": {
"v35": [
{"_": u"1414-8145"}
],
"v10": [
{u'1': u'A01 A02 A03', u's': u'Progianti', u'r': u'ND', u'_': u'', u'n': u'Jane Márcia'},
{u's': u'Porfírio', u'r': u'ND', u'_': u'', u'n': u'Aline Bastos'}
],
"v70": [
{u'i': u'A01', u'1': u'Faculdade de Enfermagem', u'_': u'Universidade do Estado do Rio de Janeiro'},
{u'i': u'A02', u'_': u'Universidade do Estado do Rio de Janeiro'},
{u'c': u'Rio de Janeiro', u'e': u'jmprogi@uol.com.br', u'i': u'A03', u'1': u'Faculdade de Enfermagem', u'p': u'BRAZIL', u's': u'RJ', u'2': u'Grupo de Pesquisas sobre Gênero, Poder e Violêcia na Saúde e Enfermagem', u'_': u'Universidade do Estado do Rio de Janeiro'}
],
"v880": [
{"_": u"S1414-81452012000300003"}
],
"v40": [
{"_": u"pt"}
],
"v12": [
{"l": "pt", "_": u"title pt"},
{"l": "en", "_": u"title en"},
{"l": "es", "_": u"title es"}
],
"v83": [
{"a": u"abstract pt", "l": "pt", "_": ""},
{"a": u"abstract en", "l": "en", "_": ""},
{"a": u"abstract es", "l": "es", "_": ""}
],
"v65": [
{"_": u"20120900"}
],
"doi": u"10.1590/S1414-81452012000300003",
}
}
]
| 42.611111 | 796 | 0.47268 |
7954bb0f49177fb632a32d2075e90cfa8c5d888f | 1,592 | py | Python | fec/home/migrations/0039_legalresourceslanding.py | rds0751/fec-cms | 833cdac7240d056ed234ed5b503a2407e1fee1ce | [
"CC0-1.0"
] | 47 | 2015-09-09T14:23:30.000Z | 2019-12-29T13:58:41.000Z | fec/home/migrations/0039_legalresourceslanding.py | rds0751/fec-cms | 833cdac7240d056ed234ed5b503a2407e1fee1ce | [
"CC0-1.0"
] | 1,634 | 2015-08-19T16:36:28.000Z | 2018-03-09T18:22:23.000Z | fec/home/migrations/0039_legalresourceslanding.py | rds0751/fec-cms | 833cdac7240d056ed234ed5b503a2407e1fee1ce | [
"CC0-1.0"
] | 27 | 2015-08-20T02:10:13.000Z | 2021-02-14T10:51:18.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-28 23:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.contrib.table_block.blocks
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0029_unicode_slugfield_dj19'),
('wagtailimages', '0013_make_rendition_upload_callable'),
('home', '0038_resourcepage'),
]
operations = [
migrations.CreateModel(
name='LegalResourcesLanding',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('html', wagtail.wagtailcore.blocks.RawHTMLBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('table', wagtail.contrib.table_block.blocks.TableBlock())), blank=True, null=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
]
| 45.485714 | 398 | 0.682789 |
7954bbbd629a47f76ebeb04b1dacc89378ebee51 | 1,028 | py | Python | sample/diff_tile_metadata_yaml.py | RedisLabs/tile-generator | 56b602334edb38639bc7e01b1e9e68e43f9e6828 | [
"Apache-2.0"
] | null | null | null | sample/diff_tile_metadata_yaml.py | RedisLabs/tile-generator | 56b602334edb38639bc7e01b1e9e68e43f9e6828 | [
"Apache-2.0"
] | null | null | null | sample/diff_tile_metadata_yaml.py | RedisLabs/tile-generator | 56b602334edb38639bc7e01b1e9e68e43f9e6828 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import dictdiffer
import sys
import yaml
if len(sys.argv) != 3:
print 'Usage: %s FILE_A FILE_B\nSort tile metadata yaml files and compare FILE_A to FILE_B.' % sys.argv[0].split('/')[-1]
sys.exit(1)
with open(sys.argv[1], 'r') as f:
a = f.read()
with open(sys.argv[2], 'r') as f:
b = f.read()
aa = yaml.load(a)
bb = yaml.load(b)
for conf in [aa, bb]:
for release in conf['releases']:
if release.get('name') == 'test-tile':
release.pop('version')
release.pop('file')
conf.pop('product_version')
conf.pop('provides_product_versions')
conf['property_blueprints'] = sorted(conf['property_blueprints'], key=lambda k: k['name'])
conf['job_types'] = sorted(conf['job_types'], key=lambda k: k['name'])
for x in conf['job_types']: x['manifest'] = sorted(x['manifest'].items()) if type(x['manifest']) is dict else sorted(yaml.load(x['manifest']).items())
from pprint import pprint
for x in list(dictdiffer.diff(aa,bb)): pprint(x); print ''
| 30.235294 | 154 | 0.63716 |
7954bd748326eb667db0d4c4fed66ea50c1593cd | 282 | py | Python | easy_dict/tests/test_05_sets.py | cahoy/NestedDictionary | 881f0ea8af36a60fcd1b9d7a84b1aec4cd7072b2 | [
"MIT"
] | 2 | 2021-02-13T03:58:59.000Z | 2021-09-15T01:02:12.000Z | easy_dict/tests/test_05_sets.py | cahoy/NestedDictionary | 881f0ea8af36a60fcd1b9d7a84b1aec4cd7072b2 | [
"MIT"
] | 1 | 2017-01-12T00:18:38.000Z | 2017-01-12T00:18:38.000Z | easy_dict/tests/test_05_sets.py | cahoy/NestedDictionary | 881f0ea8af36a60fcd1b9d7a84b1aec4cd7072b2 | [
"MIT"
] | 1 | 2022-02-16T18:28:52.000Z | 2022-02-16T18:28:52.000Z | from pytest import fixture
import easy_dict as nd
@fixture()
def n():
return nd.NestedDict()
def test_set_pair_deep_keys_with_bracket(n):
n['a'] = {}
n['a']['b'] = {}
n['a']['b']['c'] = 123
n['d'] = None
assert n == {'a': {'b': {'c': 123}}, 'd': None}
| 15.666667 | 51 | 0.521277 |
7954be248a7b1a4e57baaae1e47f3a97a3ed865e | 15,056 | py | Python | tensorflow_data_validation/utils/display_util.py | aaltay/data-validation | 14773d06aa673f7f525d76bb785887e51d920a6f | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/utils/display_util.py | aaltay/data-validation | 14773d06aa673f7f525d76bb785887e51d920a6f | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/utils/display_util.py | aaltay/data-validation | 14773d06aa673f7f525d76bb785887e51d920a6f | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for example notebooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
from typing import List, Optional, Text
import pandas as pd
from tensorflow_data_validation import types
from tensorflow_data_validation.utils import stats_util
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
try:
# pylint: disable=g-import-not-at-top
from IPython.display import display
from IPython.display import HTML
except ImportError as e:
raise ImportError('To use visualization features, make sure ipython is '
'installed, or install TFDV using "pip install '
'tensorflow-data-validation[visualization]": {}'.format(e))
def _add_quotes(input_str: types.FeatureName) -> types.FeatureName:
return "'" + input_str.replace("'", "\\'") + "'"
def display_schema(schema: schema_pb2.Schema) -> None:
"""Displays the input schema.
Args:
schema: A Schema protocol buffer.
"""
if not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
# Extract all the string domains at the schema level.
domain_rows = []
for domain in schema.string_domain:
domain_rows.append(
[_add_quotes(domain.name),
', '.join(_add_quotes(v) for v in domain.value)])
feature_rows = []
# Iterate over the features in the schema and extract the properties of each
# feature.
for feature in schema.feature:
# Extract the presence information of the feature.
if feature.HasField('presence'):
if feature.presence.min_fraction == 1.0:
feature_presence = 'required'
else:
feature_presence = 'optional'
else:
feature_presence = ''
# Extract the valency information of the feature.
valency = ''
if feature.HasField('value_count'):
if (feature.value_count.min == feature.value_count.max and
feature.value_count.min == 1):
valency = 'single'
else:
min_value_count = ('[%d' % feature.value_count.min
if feature.value_count.HasField('min') else '[0')
max_value_count = ('%d]' % feature.value_count.max
if feature.value_count.HasField('max') else 'inf)')
valency = min_value_count + ',' + max_value_count
# Extract the feature type.
feature_type = schema_pb2.FeatureType.Name(feature.type)
# If the feature has a string domain, treat it as a string feature.
if feature_type == 'BYTES' and (feature.HasField('domain') or
feature.HasField('string_domain')):
feature_type = 'STRING'
# Extract the domain (if any) of the feature.
domain = '-'
if feature.HasField('domain'):
domain = _add_quotes(feature.domain)
elif feature.HasField('int_domain'):
left_value = ('[%d' % feature.int_domain.min
if feature.int_domain.HasField('min') else '(-inf')
right_value = ('%d]' % feature.int_domain.max
if feature.int_domain.HasField('max') else 'inf)')
domain = left_value + ',' + right_value
elif feature.HasField('float_domain'):
left_value = ('[%f' % feature.float_domain.min
if feature.float_domain.HasField('min') else '(-inf')
right_value = ('%f]' % feature.float_domain.max
if feature.float_domain.HasField('max') else 'inf)')
domain = left_value + ',' + right_value
elif feature.HasField('string_domain'):
domain = _add_quotes(feature.string_domain.name if
feature.string_domain.name else
feature.name + '_domain')
domain_rows.append([domain,
', '.join(_add_quotes(v) for v in
feature.string_domain.value)])
feature_rows.append(
[_add_quotes(feature.name), feature_type, feature_presence, valency,
domain])
# Construct a DataFrame consisting of the properties of the features
# and display it.
features = pd.DataFrame(
feature_rows,
columns=['Feature name', 'Type', 'Presence', 'Valency',
'Domain']).set_index('Feature name')
display(features)
# Construct a DataFrame consisting of the domain values and display it.
if domain_rows:
domains = pd.DataFrame(
domain_rows, columns=['Domain',
'Values']).set_index('Domain')
# Do not truncate columns.
pd.set_option('max_colwidth', -1)
display(domains)
def display_anomalies(anomalies: anomalies_pb2.Anomalies) -> None:
"""Displays the input anomalies.
Args:
anomalies: An Anomalies protocol buffer.
"""
if not isinstance(anomalies, anomalies_pb2.Anomalies):
raise TypeError('anomalies is of type %s, should be an Anomalies proto.' %
type(anomalies).__name__)
anomaly_rows = []
for feature_name, anomaly_info in anomalies.anomaly_info.items():
anomaly_rows.append([
_add_quotes(feature_name), anomaly_info.short_description,
anomaly_info.description
])
if anomalies.HasField('dataset_anomaly_info'):
anomaly_rows.append([
'[dataset anomaly]', anomalies.dataset_anomaly_info.short_description,
anomalies.dataset_anomaly_info.description
])
if not anomaly_rows:
display(HTML('<h4 style="color:green;">No anomalies found.</h4>'))
else:
# Construct a DataFrame consisting of the anomalies and display it.
anomalies_df = pd.DataFrame(
anomaly_rows,
columns=['Feature name', 'Anomaly short description',
'Anomaly long description']).set_index('Feature name')
# Do not truncate columns.
pd.set_option('max_colwidth', -1)
display(anomalies_df)
def _project_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList,
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Project statistics proto based on allowlist and denylist features."""
if allowlist_features is None and denylist_features is None:
return statistics
result = statistics_pb2.DatasetFeatureStatisticsList()
for dataset_stats in statistics.datasets:
result_dataset_stats = result.datasets.add()
result_dataset_stats.MergeFrom(dataset_stats)
del result_dataset_stats.features[:]
if allowlist_features is not None:
allowlist_features = set(allowlist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in allowlist_features:
result_dataset_stats.features.add().MergeFrom(feature)
else:
denylist_features = set(denylist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in denylist_features:
continue
result_dataset_stats.features.add().MergeFrom(feature)
return result
def _get_combined_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Get combined datatset statistics list proto."""
if not isinstance(lhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError(
'lhs_statistics is of type %s, should be '
'a DatasetFeatureStatisticsList proto.' % type(lhs_statistics).__name__)
if len(lhs_statistics.datasets) != 1:
raise ValueError('lhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if lhs_statistics.datasets[0].name:
lhs_name = lhs_statistics.datasets[0].name
# Add lhs stats.
lhs_statistics = _project_statistics(
lhs_statistics, allowlist_features, denylist_features)
combined_statistics = statistics_pb2.DatasetFeatureStatisticsList()
lhs_stats_copy = combined_statistics.datasets.add()
lhs_stats_copy.MergeFrom(lhs_statistics.datasets[0])
if rhs_statistics is not None:
if not isinstance(rhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError('rhs_statistics is of type %s, should be a '
'DatasetFeatureStatisticsList proto.'
% type(rhs_statistics).__name__)
if len(rhs_statistics.datasets) != 1:
raise ValueError('rhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if rhs_statistics.datasets[0].name:
rhs_name = rhs_statistics.datasets[0].name
# If we have same name, revert to default names.
if lhs_name == rhs_name:
lhs_name, rhs_name = 'lhs_statistics', 'rhs_statistics'
# Add rhs stats.
rhs_statistics = _project_statistics(
rhs_statistics, allowlist_features, denylist_features)
rhs_stats_copy = combined_statistics.datasets.add()
rhs_stats_copy.MergeFrom(rhs_statistics.datasets[0])
rhs_stats_copy.name = rhs_name
# Update lhs name.
lhs_stats_copy.name = lhs_name
return combined_statistics
def get_statistics_html(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> Text:
"""Build the HTML for visualizing the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Returns:
HTML to be embedded for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
combined_statistics = _get_combined_statistics(
lhs_statistics, rhs_statistics, lhs_name, rhs_name, allowlist_features,
denylist_features)
protostr = base64.b64encode(
combined_statistics.SerializeToString()).decode('utf-8')
# pylint: disable=line-too-long,anomalous-backslash-in-string
# Note that in the html template we currently assign a temporary id to the
# facets element and then remove it once we have appended the serialized proto
# string to the element. We do this to avoid any collision of ids when
# displaying multiple facets output in the notebook.
#
# Note that a string literal including '</script>' in a <script> tag needs to
# escape it as <\/script> to avoid early closing the wrapping <script> tag.
html_template = """<iframe id='facets-iframe' width="100%" height="500px"></iframe>
<script>
facets_iframe = document.getElementById('facets-iframe');
facets_html = '<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"><\/script><link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/master/facets-dist/facets-jupyter.html"><facets-overview proto-input="protostr"></facets-overview>';
facets_iframe.srcdoc = facets_html;
facets_iframe.id = "";
setTimeout(() => {
facets_iframe.setAttribute('height', facets_iframe.contentWindow.document.body.offsetHeight + 'px')
}, 1500)
</script>"""
# pylint: enable=line-too-long
html = html_template.replace('protostr', protostr)
return html
def visualize_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None) -> None:
"""Visualize the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
assert (not allowlist_features or not denylist_features), (
'Only specify one of allowlist_features and denylist_features.')
html = get_statistics_html(lhs_statistics, rhs_statistics, lhs_name, rhs_name,
allowlist_features, denylist_features)
display(HTML(html))
def compare_slices(statistics: statistics_pb2.DatasetFeatureStatisticsList,
lhs_slice_key: Text, rhs_slice_key: Text):
"""Compare statistics of two slices using Facets.
Args:
statistics: A DatasetFeatureStatisticsList protocol buffer.
lhs_slice_key: Slice key of the first slice.
rhs_slice_key: Slice key of the second slice.
Raises:
ValueError: If the input statistics proto does not have the specified slice
statistics.
"""
lhs_stats = stats_util.get_slice_stats(statistics, lhs_slice_key)
rhs_stats = stats_util.get_slice_stats(statistics, rhs_slice_key)
visualize_statistics(lhs_stats, rhs_stats,
lhs_name=lhs_slice_key, rhs_name=rhs_slice_key)
| 40.913043 | 306 | 0.702046 |
7954be49be8fcebfe2882a5a0529d3f5edfc90b7 | 10,780 | py | Python | ryu_enhancements/utils/bgp.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | 8 | 2021-08-25T01:08:09.000Z | 2022-01-18T12:44:41.000Z | ryu_enhancements/utils/bgp.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | null | null | null | ryu_enhancements/utils/bgp.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | 1 | 2022-03-13T22:36:18.000Z | 2022-03-13T22:36:18.000Z | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities related to bgp data types and models.
"""
import logging
import netaddr
from ryu.lib import ip
from ryu.lib.packet.bgp import (
BGPUpdate,
RF_IPv4_UC,
RF_IPv6_UC,
RF_IPv4_VPN,
RF_IPv6_VPN,
RF_L2_EVPN,
RF_IPv4_FLOWSPEC,
RF_IPv6_FLOWSPEC,
RF_VPNv4_FLOWSPEC,
RF_VPNv6_FLOWSPEC,
RF_L2VPN_FLOWSPEC,
RF_RTC_UC,
RouteTargetMembershipNLRI,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGPPathAttributeMultiExitDisc,
BGPPathAttributeMpUnreachNLRI,
BGPPathAttributeAs4Path,
BGPPathAttributeAs4Aggregator,
BGPPathAttributeUnknown,
BGP_ATTR_FLAG_OPTIONAL,
BGP_ATTR_FLAG_TRANSITIVE,
BGPTwoOctetAsSpecificExtendedCommunity,
BGPIPv4AddressSpecificExtendedCommunity,
BGPFourOctetAsSpecificExtendedCommunity,
BGPFlowSpecTrafficRateCommunity,
BGPFlowSpecTrafficActionCommunity,
BGPFlowSpecRedirectCommunity,
BGPFlowSpecTrafficMarkingCommunity,
BGPFlowSpecVlanActionCommunity,
BGPFlowSpecTPIDActionCommunity,
)
from ryu.services.protocols.bgp.info_base.rtc import RtcPath
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
from ryu.services.protocols.bgp.info_base.evpn import EvpnPath
from ryu.services.protocols.bgp.info_base.ipv4fs import IPv4FlowSpecPath
from ryu.services.protocols.bgp.info_base.ipv6fs import IPv6FlowSpecPath
from ryu.services.protocols.bgp.info_base.vpnv4fs import VPNv4FlowSpecPath
from ryu.services.protocols.bgp.info_base.vpnv6fs import VPNv6FlowSpecPath
from ryu.services.protocols.bgp.info_base.l2vpnfs import L2VPNFlowSpecPath
LOG = logging.getLogger('utils.bgp')
# RouteFamily to path sub-class mapping.
_ROUTE_FAMILY_TO_PATH_MAP = {RF_IPv4_UC: Ipv4Path,
RF_IPv6_UC: Ipv6Path,
RF_IPv4_VPN: Vpnv4Path,
RF_IPv6_VPN: Vpnv6Path,
RF_L2_EVPN: EvpnPath,
RF_IPv4_FLOWSPEC: IPv4FlowSpecPath,
RF_IPv6_FLOWSPEC: IPv6FlowSpecPath,
RF_VPNv4_FLOWSPEC: VPNv4FlowSpecPath,
RF_VPNv6_FLOWSPEC: VPNv6FlowSpecPath,
RF_L2VPN_FLOWSPEC: L2VPNFlowSpecPath,
RF_RTC_UC: RtcPath}
def create_path(src_peer, nlri, **kwargs):
route_family = nlri.ROUTE_FAMILY
assert route_family in _ROUTE_FAMILY_TO_PATH_MAP.keys()
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
return path_cls(src_peer, nlri, src_peer.version_num, **kwargs)
def clone_path_and_update_med_for_target_neighbor(path, med):
assert path # and med # JvB removed, can be None
route_family = path.route_family
if route_family not in _ROUTE_FAMILY_TO_PATH_MAP.keys():
raise ValueError('Clone is not supported for address-family %s' %
route_family)
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
pattrs = path.pathattr_map
if med: # JvB added
pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = BGPPathAttributeMultiExitDisc(med)
return path_cls(
path.source, path.nlri, path.source_version_num,
pattrs=pattrs, nexthop=path.nexthop,
is_withdraw=path.is_withdraw,
med_set_by_target_neighbor=(med is not None) # JvB conditional
)
def clone_rtcpath_update_rt_as(path, new_rt_as):
"""Clones given RT NLRI `path`, and updates it with new RT_NLRI AS.
Parameters:
- `path`: (Path) RT_NLRI path
- `new_rt_as`: AS value of cloned paths' RT_NLRI
"""
assert path and new_rt_as
if not path or path.route_family != RF_RTC_UC:
raise ValueError('Expected RT_NLRI path')
old_nlri = path.nlri
new_rt_nlri = RouteTargetMembershipNLRI(new_rt_as, old_nlri.route_target)
return RtcPath(path.source, new_rt_nlri, path.source_version_num,
pattrs=path.pathattr_map, nexthop=path.nexthop,
is_withdraw=path.is_withdraw)
def from_inet_ptoi(bgp_id):
"""Convert an IPv4 address string format to a four byte long.
"""
four_byte_id = None
try:
four_byte_id = ip.ipv4_to_int(bgp_id)
except ValueError:
LOG.debug('Invalid bgp id given for conversion to integer value %s',
bgp_id)
return four_byte_id
def get_unknown_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown and unsupported optional
transitive path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.items():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.flags & (BGP_ATTR_FLAG_OPTIONAL |
BGP_ATTR_FLAG_TRANSITIVE)) or \
isinstance(attr, BGPPathAttributeAs4Path) or \
isinstance(attr, BGPPathAttributeAs4Aggregator):
unknown_opt_tran_attrs[attr.type] = attr
return unknown_opt_tran_attrs
def create_end_of_rib_update():
"""Construct end-of-rib (EOR) Update instance."""
mpunreach_attr = BGPPathAttributeMpUnreachNLRI(RF_IPv4_VPN.afi,
RF_IPv4_VPN.safi,
[])
eor = BGPUpdate(path_attributes=[mpunreach_attr])
return eor
# Bgp update message instance that can used as End of RIB marker.
UPDATE_EOR = create_end_of_rib_update()
def create_rt_extended_community(value, subtype=2):
"""
Creates an instance of the BGP Route Target Community (if "subtype=2")
or Route Origin Community ("subtype=3").
:param value: String of Route Target or Route Origin value.
:param subtype: Subtype of Extended Community.
:return: An instance of Route Target or Route Origin Community.
"""
global_admin, local_admin = value.split(':')
local_admin = int(local_admin)
if global_admin.isdigit() and 0 <= int(global_admin) <= 0xffff:
ext_com = BGPTwoOctetAsSpecificExtendedCommunity(
subtype=subtype,
as_number=int(global_admin),
local_administrator=local_admin)
elif global_admin.isdigit() and 0xffff < int(global_admin) <= 0xffffffff:
ext_com = BGPFourOctetAsSpecificExtendedCommunity(
subtype=subtype,
as_number=int(global_admin),
local_administrator=local_admin)
elif ip.valid_ipv4(global_admin):
ext_com = BGPIPv4AddressSpecificExtendedCommunity(
subtype=subtype,
ipv4_address=global_admin,
local_administrator=local_admin)
else:
raise ValueError(
'Invalid Route Target or Route Origin value: %s' % value)
return ext_com
def create_v4flowspec_actions(actions=None):
"""
Create list of traffic filtering actions
for Ipv4 Flow Specification and VPNv4 Flow Specification.
`` actions`` specifies Traffic Filtering Actions of
Flow Specification as a dictionary type value.
Returns a list of extended community values.
"""
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_ACTION_TRAFFIC_RATE,
FLOWSPEC_ACTION_TRAFFIC_ACTION,
FLOWSPEC_ACTION_REDIRECT,
FLOWSPEC_ACTION_TRAFFIC_MARKING,
)
# Supported action type for IPv4 and VPNv4.
action_types = {
FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity,
FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity,
FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity,
FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity,
}
return _create_actions(actions, action_types)
def create_v6flowspec_actions(actions=None):
"""
Create list of traffic filtering actions
for Ipv6 Flow Specification and VPNv6 Flow Specification.
"FLOWSPEC_ACTION_REDIRECT_IPV6" is not implemented yet.
"""
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_ACTION_TRAFFIC_RATE,
FLOWSPEC_ACTION_TRAFFIC_ACTION,
FLOWSPEC_ACTION_REDIRECT,
FLOWSPEC_ACTION_TRAFFIC_MARKING,
)
# Supported action type for IPv6 and VPNv6.
action_types = {
FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity,
FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity,
FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity,
FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity,
}
return _create_actions(actions, action_types)
def create_l2vpnflowspec_actions(actions=None):
"""
Create list of traffic filtering actions for L2VPN Flow Specification.
"""
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_ACTION_TRAFFIC_RATE,
FLOWSPEC_ACTION_TRAFFIC_ACTION,
FLOWSPEC_ACTION_REDIRECT,
FLOWSPEC_ACTION_TRAFFIC_MARKING,
FLOWSPEC_ACTION_VLAN,
FLOWSPEC_ACTION_TPID,
)
# Supported action type for L2VPN.
action_types = {
FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity,
FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity,
FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity,
FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity,
FLOWSPEC_ACTION_VLAN: BGPFlowSpecVlanActionCommunity,
FLOWSPEC_ACTION_TPID: BGPFlowSpecTPIDActionCommunity,
}
return _create_actions(actions, action_types)
def _create_actions(actions, action_types):
communities = []
if actions is None:
return communities
for name, action in actions.items():
cls_ = action_types.get(name, None)
if cls_:
communities.append(cls_(**action))
else:
raise ValueError(
'Unsupported flowspec action %s' % name)
return communities
| 36.053512 | 81 | 0.707978 |
7954bec750e8ec5ae25f7e73efe764ad8fde3b64 | 858 | py | Python | data_utils/pyrnashapes_bp.py | lygztq/ml-proj2 | e0c61ae58f61fa04d264008f35fceaac5999708b | [
"WTFPL"
] | 2 | 2018-06-13T06:51:42.000Z | 2018-07-01T07:09:28.000Z | data_utils/pyrnashapes_bp.py | lygztq/ml-proj2 | e0c61ae58f61fa04d264008f35fceaac5999708b | [
"WTFPL"
] | 2 | 2018-06-11T09:08:23.000Z | 2018-07-10T01:53:23.000Z | data_utils/pyrnashapes_bp.py | lygztq/ml-proj2 | e0c61ae58f61fa04d264008f35fceaac5999708b | [
"WTFPL"
] | null | null | null | import subprocess
# from data_utils.RNA_process import shape2matrix
from RNA_process import shape2matrix
def rnashapes(rna):
result = subprocess.run(['./data_utils/rnashapes', '-O', 'D{$%s}', rna], stdout = subprocess.PIPE)
result.check_returncode()
result = result.stdout.decode('utf8')
result = result[result.index('$') + 1:]
# result = shape2matrix(result)
return result
def _test():
a = 'GACAGAGTGAGACCCTATCTCAAAAAACAAACAAAAAAGAGTTCTGTTTGGGCATGAAAGTGTTTAATGTTTATTGGACATTTGGGTGGAGATGTCGAGTAGGCAGTTGGAAATTCAAGTCTGTAGCTTAGGGATGAGAGCAGGTGGTCTCTCAGCAGTCATTGGCAATACATGATTCGTGGGACTGCAAGAGCTCATCACGGGAGAGAATATAAAGAAGCCCTTTCCCCTCATTCTGTAGATGAGGGAACTATGCTCAGAATAGACGTATCTGTTTTCACAGTGAATCAGCAATAGAAT'
a = a.lower()
b = rnashapes(a[:100])
print(b)
print(len(b))
print(a)
if __name__ == '__main__':
_test()
| 35.75 | 310 | 0.763403 |
7954bf29a9e2a560d969402c739688ead6e7bb68 | 685 | py | Python | PyOpenGL-3.0.2/OpenGL/raw/GL/EXT/index_func.py | frederica07/Dragon_Programming_Process | c0dff2e20c1be6db5adc6f9977efae8f7f888ef5 | [
"BSD-2-Clause"
] | null | null | null | PyOpenGL-3.0.2/OpenGL/raw/GL/EXT/index_func.py | frederica07/Dragon_Programming_Process | c0dff2e20c1be6db5adc6f9977efae8f7f888ef5 | [
"BSD-2-Clause"
] | null | null | null | PyOpenGL-3.0.2/OpenGL/raw/GL/EXT/index_func.py | frederica07/Dragon_Programming_Process | c0dff2e20c1be6db5adc6f9977efae8f7f888ef5 | [
"BSD-2-Clause"
] | null | null | null | '''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_index_func'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_EXT_index_func',False)
_p.unpack_constants( """GL_INDEX_TEST_EXT 0x81B5
GL_INDEX_TEST_FUNC_EXT 0x81B6
GL_INDEX_TEST_REF_EXT 0x81B7""", globals())
@_f
@_p.types(None,_cs.GLenum,_cs.GLclampf)
def glIndexFuncEXT( func,ref ):pass
def glInitIndexFuncEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| 34.25 | 71 | 0.786861 |
7954bf59a0755d132e9ab5a4f33593b474c0b330 | 2,887 | py | Python | shreder/__main__.py | mkdirlove/Shreder | 785eab317d5d07cc468585ea97a5b0bef74ccddc | [
"MIT"
] | 2 | 2021-07-28T13:21:49.000Z | 2021-09-07T12:36:44.000Z | shreder/__main__.py | mkdirlove/Shreder | 785eab317d5d07cc468585ea97a5b0bef74ccddc | [
"MIT"
] | null | null | null | shreder/__main__.py | mkdirlove/Shreder | 785eab317d5d07cc468585ea97a5b0bef74ccddc | [
"MIT"
] | 1 | 2021-09-22T10:33:56.000Z | 2021-09-22T10:33:56.000Z | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import paramiko
from time import sleep as ssh_delay
from threading import Thread as ssh_thread
from .badges import Badges
class Shreder(Badges):
password = None
ssh_delay = 0.1
def connect(self, host, port, username, password):
ssh = paramiko.client.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host, port=int(port), username=username, password=password)
self.password = password
except Exception:
return
ssh.close()
def brute(self, host, port, username, dictionary):
with open(dictionary, 'r') as f:
threads = list()
lines = f.read().split('\n')
for password in lines:
if password.strip():
threads.append(
ssh_thread(
target=self.connect,
args=[host, port, username, password]
)
)
line = "/-\|"
counter = 0
tried = 1
for thread in threads:
if not self.password:
if counter >= len(line):
counter = 0
self.print_process(
f"Processing... {line[counter]} | Passwords tried: {tried}/{str(len(threads))}", end=''
)
ssh_delay(self.ssh_delay)
thread.start()
counter += 1
tried += 1
self.print_empty(end='')
for thread in threads:
if thread.is_alive():
thread.join()
return self.password
| 33.183908 | 111 | 0.592657 |
7954bf959964ace9bd15d258958da924b7f659c7 | 78,499 | py | Python | NN_layers/SEResNeXt50.py | Mxbonn/zigzag_fork | 250ee5e22904ba846dfb106983d46b83bd9ee230 | [
"BSD-3-Clause"
] | 34 | 2020-08-11T14:38:29.000Z | 2022-03-30T10:32:34.000Z | NN_layers/SEResNeXt50.py | Mxbonn/zigzag_fork | 250ee5e22904ba846dfb106983d46b83bd9ee230 | [
"BSD-3-Clause"
] | 9 | 2020-11-16T19:19:48.000Z | 2022-03-31T18:29:24.000Z | NN_layers/SEResNeXt50.py | Mxbonn/zigzag_fork | 250ee5e22904ba846dfb106983d46b83bd9ee230 | [
"BSD-3-Clause"
] | 18 | 2020-08-24T07:26:51.000Z | 2022-01-06T00:59:11.000Z | layer_info = \
{1: {'B': 1, 'K': 64, 'C': 3, 'OY': 112, 'OX': 112, 'FY': 7, 'FX': 7, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
2: {'B': 1, 'K': 128, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
3: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
4: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
5: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
6: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
7: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
8: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
9: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
10: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
11: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
12: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
13: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
14: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
15: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
16: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
17: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
18: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
19: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
20: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
21: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
22: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
23: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
24: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
25: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
26: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
27: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
28: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
29: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
30: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
31: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
32: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
33: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
34: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
35: {'B': 1, 'K': 256, 'C': 128, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
36: {'B': 1, 'K': 16, 'C': 256, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
37: {'B': 1, 'K': 256, 'C': 16, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
38: {'B': 1, 'K': 256, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
39: {'B': 1, 'K': 128, 'C': 256, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
40: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
41: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
42: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
43: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
44: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
45: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
46: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
47: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
48: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
49: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
50: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
51: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
52: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
53: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
54: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
55: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
56: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
57: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
58: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
59: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
60: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
61: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
62: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
63: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
64: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
65: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
66: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
67: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
68: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
69: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
70: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
71: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
72: {'B': 1, 'K': 256, 'C': 128, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
73: {'B': 1, 'K': 16, 'C': 256, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
74: {'B': 1, 'K': 256, 'C': 16, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
75: {'B': 1, 'K': 128, 'C': 256, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
76: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
77: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
78: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
79: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
80: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
81: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
82: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
83: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
84: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
85: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
86: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
87: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
88: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
89: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
90: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
91: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
92: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
93: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
94: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
95: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
96: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
97: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
98: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
99: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
100: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
101: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
102: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
103: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
104: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
105: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
106: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
107: {'B': 1, 'K': 4, 'C': 4, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
108: {'B': 1, 'K': 256, 'C': 128, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
109: {'B': 1, 'K': 16, 'C': 256, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
110: {'B': 1, 'K': 256, 'C': 16, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
111: {'B': 1, 'K': 256, 'C': 256, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
112: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
113: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
114: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
115: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
116: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
117: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
118: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
119: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
120: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
121: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
122: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
123: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
124: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
125: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
126: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
127: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
128: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
129: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
130: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
131: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
132: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
133: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
134: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
135: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
136: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
137: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
138: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
139: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
140: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
141: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
142: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
143: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
144: {'B': 1, 'K': 512, 'C': 256, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
145: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
146: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
147: {'B': 1, 'K': 512, 'C': 256, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
148: {'B': 1, 'K': 256, 'C': 512, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
149: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
150: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
151: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
152: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
153: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
154: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
155: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
156: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
157: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
158: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
159: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
160: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
161: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
162: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
163: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
164: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
165: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
166: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
167: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
168: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
169: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
170: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
171: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
172: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
173: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
174: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
175: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
176: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
177: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
178: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
179: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
180: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
181: {'B': 1, 'K': 512, 'C': 256, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
182: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
183: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
184: {'B': 1, 'K': 256, 'C': 512, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
185: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
186: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
187: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
188: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
189: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
190: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
191: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
192: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
193: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
194: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
195: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
196: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
197: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
198: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
199: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
200: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
201: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
202: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
203: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
204: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
205: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
206: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
207: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
208: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
209: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
210: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
211: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
212: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
213: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
214: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
215: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
216: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
217: {'B': 1, 'K': 512, 'C': 256, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
218: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
219: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
220: {'B': 1, 'K': 256, 'C': 512, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
221: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
222: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
223: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
224: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
225: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
226: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
227: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
228: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
229: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
230: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
231: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
232: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
233: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
234: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
235: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
236: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
237: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
238: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
239: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
240: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
241: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
242: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
243: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
244: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
245: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
246: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
247: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
248: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
249: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
250: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
251: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
252: {'B': 1, 'K': 8, 'C': 8, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
253: {'B': 1, 'K': 512, 'C': 256, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
254: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
255: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
256: {'B': 1, 'K': 512, 'C': 512, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
257: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
258: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
259: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
260: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
261: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
262: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
263: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
264: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
265: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
266: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
267: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
268: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
269: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
270: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
271: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
272: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
273: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
274: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
275: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
276: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
277: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
278: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
279: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
280: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
281: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
282: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
283: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
284: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
285: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
286: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
287: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
288: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
289: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
290: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
291: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
292: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
293: {'B': 1, 'K': 512, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
294: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
295: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
296: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
297: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
298: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
299: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
300: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
301: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
302: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
303: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
304: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
305: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
306: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
307: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
308: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
309: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
310: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
311: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
312: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
313: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
314: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
315: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
316: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
317: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
318: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
319: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
320: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
321: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
322: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
323: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
324: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
325: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
326: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
327: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
328: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
329: {'B': 1, 'K': 512, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
330: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
331: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
332: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
333: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
334: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
335: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
336: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
337: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
338: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
339: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
340: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
341: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
342: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
343: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
344: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
345: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
346: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
347: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
348: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
349: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
350: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
351: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
352: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
353: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
354: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
355: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
356: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
357: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
358: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
359: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
360: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
361: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
362: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
363: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
364: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
365: {'B': 1, 'K': 512, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
366: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
367: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
368: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
369: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
370: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
371: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
372: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
373: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
374: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
375: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
376: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
377: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
378: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
379: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
380: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
381: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
382: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
383: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
384: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
385: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
386: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
387: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
388: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
389: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
390: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
391: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
392: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
393: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
394: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
395: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
396: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
397: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
398: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
399: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
400: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
401: {'B': 1, 'K': 512, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
402: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
403: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
404: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
405: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
406: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
407: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
408: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
409: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
410: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
411: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
412: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
413: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
414: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
415: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
416: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
417: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
418: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
419: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
420: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
421: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
422: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
423: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
424: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
425: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
426: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
427: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
428: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
429: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
430: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
431: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
432: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
433: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
434: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
435: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
436: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
437: {'B': 1, 'K': 512, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
438: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
439: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
440: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
441: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
442: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
443: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
444: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
445: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
446: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
447: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
448: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
449: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
450: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
451: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
452: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
453: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
454: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
455: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
456: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
457: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
458: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
459: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
460: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
461: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
462: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
463: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
464: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
465: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
466: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
467: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
468: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
469: {'B': 1, 'K': 16, 'C': 16, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
470: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
471: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
472: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
473: {'B': 1, 'K': 1024, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
474: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
475: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
476: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
477: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
478: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
479: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
480: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
481: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
482: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
483: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
484: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
485: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
486: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
487: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
488: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
489: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
490: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
491: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
492: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
493: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
494: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
495: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
496: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
497: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
498: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
499: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
500: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
501: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
502: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
503: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
504: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
505: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
506: {'B': 1, 'K': 2048, 'C': 1024, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
507: {'B': 1, 'K': 128, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
508: {'B': 1, 'K': 2048, 'C': 128, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
509: {'B': 1, 'K': 2048, 'C': 1024, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
510: {'B': 1, 'K': 1024, 'C': 2048, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
511: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
512: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
513: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
514: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
515: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
516: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
517: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
518: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
519: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
520: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
521: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
522: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
523: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
524: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
525: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
526: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
527: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
528: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
529: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
530: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
531: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
532: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
533: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
534: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
535: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
536: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
537: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
538: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
539: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
540: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
541: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
542: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
543: {'B': 1, 'K': 2048, 'C': 1024, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
544: {'B': 1, 'K': 128, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
545: {'B': 1, 'K': 2048, 'C': 128, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
546: {'B': 1, 'K': 1024, 'C': 2048, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
547: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
548: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
549: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
550: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
551: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
552: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
553: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
554: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
555: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
556: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
557: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
558: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
559: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
560: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
561: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
562: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
563: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
564: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
565: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
566: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
567: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
568: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
569: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
570: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
571: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
572: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
573: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
574: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
575: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
576: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
577: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
578: {'B': 1, 'K': 32, 'C': 32, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
579: {'B': 1, 'K': 2048, 'C': 1024, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
580: {'B': 1, 'K': 128, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
581: {'B': 1, 'K': 2048, 'C': 128, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
582: {'B': 1, 'K': 1000, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1}}
| 134.416096 | 139 | 0.340068 |
7954bf98a489379c82cbc06f1630108eba1dd616 | 3,089 | py | Python | analysis/rank/rank_small_barriers.py | astutespruce/sarp | 7ce503380440c47b762ed1a8efd1d3e3aab6605e | [
"MIT"
] | null | null | null | analysis/rank/rank_small_barriers.py | astutespruce/sarp | 7ce503380440c47b762ed1a8efd1d3e3aab6605e | [
"MIT"
] | null | null | null | analysis/rank/rank_small_barriers.py | astutespruce/sarp | 7ce503380440c47b762ed1a8efd1d3e3aab6605e | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from time import time
import warnings
import pandas as pd
from analysis.rank.lib.networks import get_network_results
from analysis.rank.lib.metrics import (
classify_streamorder,
classify_spps,
classify_percent_altered,
)
from api.constants import SB_API_FIELDS
warnings.filterwarnings("ignore", message=".*initial implementation of Parquet.*")
start = time()
data_dir = Path("data")
barriers_dir = data_dir / "barriers/master"
api_dir = data_dir / "api"
results_dir = data_dir / "barriers/networks"
if not os.path.exists(api_dir):
os.makedirs(api_dir)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
### Read in master
print("Reading master...")
df = (
pd.read_feather(barriers_dir / "small_barriers.feather")
.set_index("id")
.drop(
columns=[
"geometry",
"level_0",
"index",
"dup_group",
"dup_count",
"dup_log",
"snap_dist",
"snap_tolerance",
"snap_ref_id",
"snap_log",
"snapped",
"log",
"lineID",
"wbID",
],
errors="ignore",
)
.rename(columns={"excluded": "Excluded", "intermittent": "Intermittent",})
)
# Drop any that are duplicates
# NOTE: we retain those that were dropped because these are relevant for folks to know what
# has been inventoried (e.g., those dropped because no barrier, etc)
# but do drop any that have no state or HUC2
df = df.loc[(~df.duplicate) & (df.State)].copy()
### Classify StreamOrder
df["StreamOrderClass"] = classify_streamorder(df.StreamOrder)
for col in ["TESpp", "StateSGCNSpp", "RegionalSGCNSpp"]:
df[f"{col}Class"] = classify_spps(df[col])
### Get network results
networks = get_network_results(df, "small_barriers")
df = df.join(networks)
# True if the barrier was snapped to a network and has network results in the
# all networks scenario
df["HasNetwork"] = df.index.isin(networks.index)
df["Ranked"] = df.HasNetwork & (~df.unranked)
# Intermittent is not applicable if it doesn't have a network
df["Intermittent"] = df["Intermittent"].astype("int8")
df.loc[~df.HasNetwork, "Intermittent"] = -1
### Classify PercentAltered
df["PercentAltered"] = -1
df.loc[df.HasNetwork, "PercentAltered"] = 100 - df.loc[df.HasNetwork].PercentUnaltered
df["PercentAlteredClass"] = classify_percent_altered(df.PercentAltered)
# fill network columns and set proper type
for col in networks.columns:
df[col] = df[col].fillna(-1).astype(networks[col].dtype)
### Sanity check
if df.groupby(level=0).size().max() > 1:
raise ValueError(
"Error - there are duplicate barriers in the results for small_barriers. Check uniqueness of IDs and joins."
)
### Write out data for API
print(f"Writing to output files...")
# Full results for tiles, etc
df.reset_index().to_feather(results_dir / "small_barriers.feather")
# save for API
df[df.columns.intersection(SB_API_FIELDS)].reset_index().to_feather(
api_dir / f"small_barriers.feather"
)
| 26.401709 | 117 | 0.679508 |
7954bfa8109e3b1811a508c979c711c085866c19 | 4,174 | py | Python | accounts/views.py | AlenKrga1/FreelanceSolution | 0a0d656bd90a20c6250bcc889b35cea2fb945427 | [
"BSD-Source-Code"
] | null | null | null | accounts/views.py | AlenKrga1/FreelanceSolution | 0a0d656bd90a20c6250bcc889b35cea2fb945427 | [
"BSD-Source-Code"
] | 7 | 2021-03-30T14:04:17.000Z | 2022-03-12T00:44:39.000Z | accounts/views.py | AlenKrga1/FreelanceSolution | 0a0d656bd90a20c6250bcc889b35cea2fb945427 | [
"BSD-Source-Code"
] | 2 | 2020-07-30T21:55:06.000Z | 2020-10-27T20:53:56.000Z | from django.contrib.auth.mixins import UserPassesTestMixin, AccessMixin
from .forms import UserSignInForm, UserRegisterForm, ContactMeForm
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.core.mail import send_mail
from products.models import UserProduct
from django.views.generic import View
from django.http import HttpResponse
from django.urls import reverse
from orders.models import Order
class ContactMe(View):
def post(self, request):
form = ContactMeForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
message = form.cleaned_data['message']
# Send an email to typed in address
send_mail(
f'You sent a message to Alen Krga',
f'Thanks for reaching out! We will be in touch soon.\n\nMessage: "{message}"',
'alen.krga1@gmail.com',
[email],
fail_silently=True,
)
# Send an email to admin to notify him of a new message
send_mail(
f'You have a new message from {email}',
f'Message: {message}',
'alen.krga1@gmail.com',
['alen.krga1@gmail.com'],
fail_silently=True,
)
form.save()
messages.info(request, "Message sent!")
return redirect(reverse('index'))
messages.error(request, "Invalid form")
return redirect(reverse('contact-me'))
def get(self, request):
form = ContactMeForm()
return render(request, 'contact_me.html', {'form': form})
@login_required
def profile(request):
# Get products that the user owns and his custom orders
user_products = UserProduct.objects.filter(user = request.user)
orders = Order.objects.filter(user = request.user)
return render(request, 'profile.html', {'user_products': user_products, 'orders': orders})
@login_required
def logout(request):
auth.logout(request)
return redirect(reverse('index'))
# These Mixins are used to make sure authenticated users can't access these urls
class SignIn(UserPassesTestMixin, AccessMixin, View):
def test_func(self):
return self.request.user.is_anonymous
def handle_no_permission(self):
return redirect(reverse('profile'))
def get(self, request):
form = UserSignInForm()
return render(request, 'signin.html', {'form': form, 'next': request.GET.get('next', '')})
def post(self, request):
form = UserSignInForm(request.POST)
if form.is_valid():
user = auth.authenticate(
form.cleaned_data['username_or_email'],
password = form.cleaned_data['password']
)
if user:
auth.login(request, user)
# messages.success(request, "Sign in successful")
if request.GET.get('next', '') != '':
return redirect(request.GET.get('next'))
else:
return redirect(reverse('index'))
else:
form.add_error(None, "Your username or password are incorrect")
return render(request, 'signin.html', {'form': form, 'next': request.GET.get('next', '')})
else:
form.add_error(None, "invalid Form")
return render(request, 'signin.html', {'form': form, 'next': request.GET.get('next', '')})
# These Mixins are used to make sure authenticated users can't access these urls
class Register(UserPassesTestMixin, AccessMixin, View):
def test_func(self):
return self.request.user.is_anonymous
def handle_no_permission(self):
return redirect(reverse('profile'))
def get(self, request):
form = UserRegisterForm()
return render(request, 'register.html', {'form': form, 'next': request.GET.get('next', '')})
def post(self, request):
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
user = auth.authenticate(
form.cleaned_data['username'],
password = form.cleaned_data['password1']
)
if user:
auth.login(request, user)
# messages.success(request, "You have successfully registered")
return redirect(reverse('index'))
else:
form.add_error(None, "Your username or password are incorrect")
return render(request, 'register.html', {'form': form, 'next': request.GET.get('next', '')})
else:
form.add_error(None, "invalid Form")
return render(request, 'register.html', {'form': form, 'next': request.GET.get('next', '')}) | 27.281046 | 96 | 0.705798 |
7954bfddc76594ba909220df8c36c99ef673b97a | 623 | py | Python | manage.py | beatitud/beatitud-back | 32de6c33ec5d70e35bf76c38bedc73c5b2c3e719 | [
"MIT"
] | null | null | null | manage.py | beatitud/beatitud-back | 32de6c33ec5d70e35bf76c38bedc73c5b2c3e719 | [
"MIT"
] | null | null | null | manage.py | beatitud/beatitud-back | 32de6c33ec5d70e35bf76c38bedc73c5b2c3e719 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
# If DJANGO_SETTINGS_MODULE not given, then we take local settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'beatitud_back.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.611111 | 83 | 0.699839 |
7954c1502740dcda284eba261caaf970ae84d8e9 | 1,563 | py | Python | tests/incident_cost/test_incident_cost_service.py | ymatsiuk/dispatch | cfc0b238f980d9f8140294dd50a5527ca4e1cdb8 | [
"Apache-2.0"
] | null | null | null | tests/incident_cost/test_incident_cost_service.py | ymatsiuk/dispatch | cfc0b238f980d9f8140294dd50a5527ca4e1cdb8 | [
"Apache-2.0"
] | null | null | null | tests/incident_cost/test_incident_cost_service.py | ymatsiuk/dispatch | cfc0b238f980d9f8140294dd50a5527ca4e1cdb8 | [
"Apache-2.0"
] | null | null | null | import pytest
def test_get(session, incident_cost):
from dispatch.incident_cost.service import get
t_incident_cost = get(db_session=session, incident_cost_id=incident_cost.id)
assert t_incident_cost.id == incident_cost.id
def test_get_all(session, incident_costs):
from dispatch.incident_cost.service import get_all
t_incident_costs = get_all(db_session=session).all()
assert len(t_incident_costs) > 1
def test_create(session, incident_cost_type, project):
from dispatch.incident_cost.service import create
from dispatch.incident_cost.models import IncidentCostCreate
amount = 10000
incident_cost_in = IncidentCostCreate(
amount=amount,
incident_cost_type=incident_cost_type,
project=project,
)
incident_cost = create(db_session=session, incident_cost_in=incident_cost_in)
assert incident_cost
@pytest.mark.skip
def test_update(session, incident_cost):
from dispatch.incident_cost.service import update
from dispatch.incident_cost.models import IncidentCostUpdate
amount = 10001
incident_cost_in = IncidentCostUpdate(
amount=amount,
)
incident_cost = update(
db_session=session, incident_cost=incident_cost, incident_cost_in=incident_cost_in
)
assert incident_cost.amount == amount
def test_delete(session, incident_cost):
from dispatch.incident_cost.service import delete, get
delete(db_session=session, incident_cost_id=incident_cost.id)
assert not get(db_session=session, incident_cost_id=incident_cost.id)
| 28.944444 | 90 | 0.768394 |
7954c1dc08de57562893ae47aa2a48d221e16793 | 1,090 | py | Python | itests/test_blocking.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 86 | 2016-07-04T13:26:02.000Z | 2022-02-19T10:26:21.000Z | itests/test_blocking.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 10 | 2016-09-30T18:55:41.000Z | 2020-05-01T14:22:47.000Z | itests/test_blocking.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 45 | 2016-09-30T18:48:41.000Z | 2022-03-18T21:39:33.000Z | import unittest
from slixmpp import JID
from slixmpp.test.integration import SlixIntegration
class TestBlocking(SlixIntegration):
async def asyncSetUp(self):
await super().asyncSetUp()
self.add_client(
self.envjid('CI_ACCOUNT1'),
self.envstr('CI_ACCOUNT1_PASSWORD'),
)
self.register_plugins(['xep_0191'])
await self.connect_clients()
async def test_blocking(self):
"""Check we can block, unblock, and list blocked"""
await self.clients[0]['xep_0191'].block(
[JID('toto@example.com'), JID('titi@example.com')]
)
blocked = {JID('toto@example.com'), JID('titi@example.com')}
iq = await self.clients[0]['xep_0191'].get_blocked()
self.assertEqual(iq['blocklist']['items'], blocked)
info = await self.clients[0]['xep_0191'].unblock(
blocked,
)
iq = await self.clients[0]['xep_0191'].get_blocked()
self.assertEqual(len(iq['blocklist']['items']), 0)
suite = unittest.TestLoader().loadTestsFromTestCase(TestBlocking)
| 33.030303 | 68 | 0.629358 |
7954c3fb7fb0de00bc46aec0d1c40b206a93d411 | 44,755 | py | Python | bin/ADFRsuite/lib/python2.7/site-packages/Bio/SeqIO/_index.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | Bio/SeqIO/_index.py | OpenSourceCancer/biopython | 5245222e4de976bbfeb17e72ea0cde66c8da604d | [
"PostgreSQL"
] | null | null | null | Bio/SeqIO/_index.py | OpenSourceCancer/biopython | 5245222e4de976bbfeb17e72ea0cde66c8da604d | [
"PostgreSQL"
] | 1 | 2021-11-04T21:48:14.000Z | 2021-11-04T21:48:14.000Z | # Copyright 2009-2011 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Dictionary like indexing of sequence files (PRIVATE).
You are not expected to access this module, or any of its code, directly. This
is all handled internally by the Bio.SeqIO.index(...) function which is the
public interface for this functionality.
The basic idea is that we scan over a sequence file, looking for new record
markers. We then try and extract the string that Bio.SeqIO.parse/read would
use as the record id, ideally without actually parsing the full record. We
then use a subclassed Python dictionary to record the file offset for the
record start against the record id.
Note that this means full parsing is on demand, so any invalid or problem
record may not trigger an exception until it is accessed. This is by design.
This means our dictionary like objects have in memory ALL the keys (all the
record identifiers), which shouldn't be a problem even with second generation
sequencing. If this is an issue later on, storing the keys and offsets in a
temp lookup file might be one idea (e.g. using SQLite or an OBDA style index).
"""
import os
try:
from collections import UserDict as _dict_base
except ImportError:
from UserDict import DictMixin as _dict_base
import re
import itertools
from StringIO import StringIO
try:
from sqlite3 import dbapi2 as _sqlite
from sqlite3 import IntegrityError as _IntegrityError
from sqlite3 import OperationalError as _OperationalError
except ImportError:
#Not expected to be present on Python 2.4, ignore it
#and at least offer Bio.SeqIO.index() functionality
_sqlite = None
pass
from Bio._py3k import _bytes_to_string, _as_bytes, _as_string
from Bio import SeqIO
from Bio import Alphabet
class _IndexedSeqFileDict(_dict_base):
"""Read only dictionary interface to a sequential sequence file.
Keeps the keys and associated file offsets in memory, reads the file to
access entries as SeqRecord objects using Bio.SeqIO for parsing them.
This approach is memory limited, but will work even with millions of
sequences.
Note - as with the Bio.SeqIO.to_dict() function, duplicate keys
(record identifiers by default) are not allowed. If this happens,
a ValueError exception is raised.
By default the SeqRecord's id string is used as the dictionary
key. This can be changed by suppling an optional key_function,
a callback function which will be given the record id and must
return the desired key. For example, this allows you to parse
NCBI style FASTA identifiers, and extract the GI number to use
as the dictionary key.
Note that this dictionary is essentially read only. You cannot
add or change values, pop values, nor clear the dictionary.
"""
def __init__(self, filename, format, alphabet, key_function):
#Use key_function=None for default value
try:
proxy_class = _FormatToRandomAccess[format]
except KeyError:
raise ValueError("Unsupported format '%s'" % format)
random_access_proxy = proxy_class(filename, format, alphabet)
self._proxy = random_access_proxy
self._key_function = key_function
if key_function:
offset_iter = ((key_function(k),o,l) for (k,o,l) in random_access_proxy)
else:
offset_iter = random_access_proxy
offsets = {}
for key, offset, length in offset_iter:
#Note - we don't store the length because I want to minimise the
#memory requirements. With the SQLite backend the length is kept
#and is used to speed up the get_raw method (by about 3 times).
if key in offsets:
self._proxy._handle.close()
raise ValueError("Duplicate key '%s'" % key)
else:
offsets[key] = offset
self._offsets = offsets
def __repr__(self):
return "SeqIO.index(%r, %r, alphabet=%r, key_function=%r)" \
% (self._proxy._handle.name, self._proxy._format,
self._proxy._alphabet, self._key_function)
def __str__(self):
if self:
return "{%s : SeqRecord(...), ...}" % repr(self.keys()[0])
else:
return "{}"
def __contains__(self, key) :
return key in self._offsets
def __len__(self):
"""How many records are there?"""
return len(self._offsets)
if hasattr(dict, "iteritems"):
#Python 2, use iteritems but not items etc
def values(self):
"""Would be a list of the SeqRecord objects, but not implemented.
In general you can be indexing very very large files, with millions
of sequences. Loading all these into memory at once as SeqRecord
objects would (probably) use up all the RAM. Therefore we simply
don't support this dictionary method.
"""
raise NotImplementedError("Due to memory concerns, when indexing a "
"sequence file you cannot access all the "
"records at once.")
def items(self):
"""Would be a list of the (key, SeqRecord) tuples, but not implemented.
In general you can be indexing very very large files, with millions
of sequences. Loading all these into memory at once as SeqRecord
objects would (probably) use up all the RAM. Therefore we simply
don't support this dictionary method.
"""
raise NotImplementedError("Due to memory concerns, when indexing a "
"sequence file you cannot access all the "
"records at once.")
def keys(self) :
"""Return a list of all the keys (SeqRecord identifiers)."""
#TODO - Stick a warning in here for large lists? Or just refuse?
return self._offsets.keys()
def itervalues(self):
"""Iterate over the SeqRecord) items."""
for key in self.__iter__():
yield self.__getitem__(key)
def iteritems(self):
"""Iterate over the (key, SeqRecord) items."""
for key in self.__iter__():
yield key, self.__getitem__(key)
def iterkeys(self):
"""Iterate over the keys."""
return self.__iter__()
else:
#Python 3 - define items and values as iterators
def items(self):
"""Iterate over the (key, SeqRecord) items."""
for key in self.__iter__():
yield key, self.__getitem__(key)
def values(self):
"""Iterate over the SeqRecord items."""
for key in self.__iter__():
yield self.__getitem__(key)
def keys(self):
"""Iterate over the keys."""
return self.__iter__()
def __iter__(self):
"""Iterate over the keys."""
return iter(self._offsets)
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
#Pass the offset to the proxy
record = self._proxy.get(self._offsets[key])
if self._key_function:
key2 = self._key_function(record.id)
else:
key2 = record.id
if key != key2:
raise ValueError("Key did not match (%s vs %s)" % (key, key2))
return record
def get(self, k, d=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
try:
return self.__getitem__(k)
except KeyError:
return d
def get_raw(self, key):
"""Similar to the get method, but returns the record as a raw string.
If the key is not found, a KeyError exception is raised.
Note that on Python 3 a bytes string is returned, not a typical
unicode string.
NOTE - This functionality is not supported for every file format.
"""
#Pass the offset to the proxy
return self._proxy.get_raw(self._offsets[key])
def __setitem__(self, key, value):
"""Would allow setting or replacing records, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def update(self, *args, **kwargs):
"""Would allow adding more values, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def pop(self, key, default=None):
"""Would remove specified record, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def popitem(self):
"""Would remove and return a SeqRecord, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def clear(self):
"""Would clear dictionary, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def fromkeys(self, keys, value=None):
"""A dictionary method which we don't implement."""
raise NotImplementedError("An indexed a sequence file doesn't "
"support this.")
def copy(self):
"""A dictionary method which we don't implement."""
raise NotImplementedError("An indexed a sequence file doesn't "
"support this.")
class _SQLiteManySeqFilesDict(_IndexedSeqFileDict):
"""Read only dictionary interface to many sequential sequence files.
Keeps the keys, file-numbers and offsets in an SQLite database. To access
a record by key, reads from the offset in the approapriate file using
Bio.SeqIO for parsing.
There are OS limits on the number of files that can be open at once,
so a pool are kept. If a record is required from a closed file, then
one of the open handles is closed first.
"""
def __init__(self, index_filename, filenames, format, alphabet,
key_function, max_open=10):
random_access_proxies = {}
#TODO? - Don't keep filename list in memory (just in DB)?
#Should save a chunk of memory if dealing with 1000s of files.
#Furthermore could compare a generator to the DB on reloading
#(no need to turn it into a list)
if not _sqlite:
#Hack for Python 2.4 (of if Python is compiled without it)
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("Requires sqlite3, which is "
"included Python 2.5+")
if filenames is not None:
filenames = list(filenames) #In case it was a generator
if os.path.isfile(index_filename):
#Reuse the index.
con = _sqlite.connect(index_filename)
self._con = con
#Check the count...
try:
count, = con.execute("SELECT value FROM meta_data WHERE key=?;",
("count",)).fetchone()
self._length = int(count)
if self._length == -1:
con.close()
raise ValueError("Unfinished/partial database")
count, = con.execute("SELECT COUNT(key) FROM offset_data;").fetchone()
if self._length <> int(count):
con.close()
raise ValueError("Corrupt database? %i entries not %i" \
% (int(count), self._length))
self._format, = con.execute("SELECT value FROM meta_data WHERE key=?;",
("format",)).fetchone()
if format and format != self._format:
con.close()
raise ValueError("Index file says format %s, not %s" \
% (self._format, format))
self._filenames = [row[0] for row in \
con.execute("SELECT name FROM file_data "
"ORDER BY file_number;").fetchall()]
if filenames and len(filenames) != len(self._filenames):
con.close()
raise ValueError("Index file says %i files, not %i" \
% (len(self.filenames) != len(filenames)))
if filenames and filenames != self._filenames:
con.close()
raise ValueError("Index file has different filenames")
except _OperationalError, err:
con.close()
raise ValueError("Not a Biopython index database? %s" % err)
#Now we have the format (from the DB if not given to us),
try:
proxy_class = _FormatToRandomAccess[self._format]
except KeyError:
con.close()
raise ValueError("Unsupported format '%s'" % self._format)
else:
self._filenames = filenames
self._format = format
if not format or not filenames:
raise ValueError("Filenames to index and format required")
try:
proxy_class = _FormatToRandomAccess[format]
except KeyError:
raise ValueError("Unsupported format '%s'" % format)
#Create the index
con = _sqlite.connect(index_filename)
self._con = con
#print "Creating index"
# Sqlite PRAGMA settings for speed
con.execute("PRAGMA synchronous='OFF'")
con.execute("PRAGMA locking_mode=EXCLUSIVE")
#Don't index the key column until the end (faster)
#con.execute("CREATE TABLE offset_data (key TEXT PRIMARY KEY, "
# "offset INTEGER);")
con.execute("CREATE TABLE meta_data (key TEXT, value TEXT);")
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("count", -1))
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("format", format))
#TODO - Record the alphabet?
#TODO - Record the file size and modified date?
con.execute("CREATE TABLE file_data (file_number INTEGER, name TEXT);")
con.execute("CREATE TABLE offset_data (key TEXT, file_number INTEGER, offset INTEGER, length INTEGER);")
count = 0
for i, filename in enumerate(filenames):
con.execute("INSERT INTO file_data (file_number, name) VALUES (?,?);",
(i, filename))
random_access_proxy = proxy_class(filename, format, alphabet)
if key_function:
offset_iter = ((key_function(k),i,o,l) for (k,o,l) in random_access_proxy)
else:
offset_iter = ((k,i,o,l) for (k,o,l) in random_access_proxy)
while True:
batch = list(itertools.islice(offset_iter, 100))
if not batch: break
#print "Inserting batch of %i offsets, %s ... %s" \
# % (len(batch), batch[0][0], batch[-1][0])
con.executemany("INSERT INTO offset_data (key,file_number,offset,length) VALUES (?,?,?,?);",
batch)
con.commit()
count += len(batch)
if len(random_access_proxies) < max_open:
random_access_proxies[i] = random_access_proxy
else:
random_access_proxy._handle.close()
self._length = count
#print "About to index %i entries" % count
try:
con.execute("CREATE UNIQUE INDEX IF NOT EXISTS "
"key_index ON offset_data(key);")
except _IntegrityError, err:
self._proxies = random_access_proxies
self.close()
con.close()
raise ValueError("Duplicate key? %s" % err)
con.execute("PRAGMA locking_mode=NORMAL")
con.execute("UPDATE meta_data SET value = ? WHERE key = ?;",
(count, "count"))
con.commit()
#print "Index created"
self._proxies = random_access_proxies
self._max_open = max_open
self._index_filename = index_filename
self._alphabet = alphabet
self._key_function = key_function
def __repr__(self):
return "SeqIO.index_db(%r, filenames=%r, format=%r, alphabet=%r, key_function=%r)" \
% (self._index_filename, self._filenames, self._format,
self._alphabet, self._key_function)
def __contains__(self, key):
return bool(self._con.execute("SELECT key FROM offset_data WHERE key=?;",
(key,)).fetchone())
def __len__(self):
"""How many records are there?"""
return self._length
#return self._con.execute("SELECT COUNT(key) FROM offset_data;").fetchone()[0]
def __iter__(self):
"""Iterate over the keys."""
for row in self._con.execute("SELECT key FROM offset_data;"):
yield str(row[0])
if hasattr(dict, "iteritems"):
#Python 2, use iteritems but not items etc
#Just need to override this...
def keys(self) :
"""Return a list of all the keys (SeqRecord identifiers)."""
return [str(row[0]) for row in \
self._con.execute("SELECT key FROM offset_data;").fetchall()]
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
#Pass the offset to the proxy
row = self._con.execute("SELECT file_number, offset FROM offset_data WHERE key=?;",
(key,)).fetchone()
if not row: raise KeyError
file_number, offset = row
proxies = self._proxies
if file_number in proxies:
record = proxies[file_number].get(offset)
else:
if len(proxies) >= self._max_open:
#Close an old handle...
proxies.popitem()[1]._handle.close()
#Open a new handle...
proxy = _FormatToRandomAccess[self._format]( \
self._filenames[file_number],
self._format, self._alphabet)
record = proxy.get(offset)
proxies[file_number] = proxy
if self._key_function:
key2 = self._key_function(record.id)
else:
key2 = record.id
if key != key2:
raise ValueError("Key did not match (%s vs %s)" % (key, key2))
return record
def get(self, k, d=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
try:
return self.__getitem__(k)
except KeyError:
return d
def get_raw(self, key):
"""Similar to the get method, but returns the record as a raw string.
If the key is not found, a KeyError exception is raised.
Note that on Python 3 a bytes string is returned, not a typical
unicode string.
NOTE - This functionality is not supported for every file format.
"""
#Pass the offset to the proxy
row = self._con.execute("SELECT file_number, offset, length FROM offset_data WHERE key=?;",
(key,)).fetchone()
if not row: raise KeyError
file_number, offset, length = row
proxies = self._proxies
if file_number in proxies:
if length:
#Shortcut if we have the length
h = proxies[file_number]._handle
h.seek(offset)
return h.read(length)
else:
return proxies[file_number].get_raw(offset)
else:
#This code is duplicated from __getitem__ to avoid a function call
if len(proxies) >= self._max_open:
#Close an old handle...
proxies.popitem()[1]._handle.close()
#Open a new handle...
proxy = _FormatToRandomAccess[self._format]( \
self._filenames[file_number],
self._format, self._alphabet)
proxies[file_number] = proxy
if length:
#Shortcut if we have the length
h = proxy._handle
h.seek(offset)
return h.read(length)
else:
return proxy.get_raw(offset)
def close(self):
"""Close any open file handles."""
proxies = self._proxies
while proxies:
proxies.popitem()[1]._handle.close()
##############################################################################
class SeqFileRandomAccess(object):
def __init__(self, filename, format, alphabet):
self._handle = open(filename, "rb")
self._alphabet = alphabet
self._format = format
#Load the parser class/function once an avoid the dict lookup in each
#__getitem__ call:
i = SeqIO._FormatToIterator[format]
#The following alphabet code is a bit nasty... duplicates logic in
#Bio.SeqIO.parse()
if alphabet is None:
def _parse(handle):
"""Dynamically generated parser function (PRIVATE)."""
return i(handle).next()
else:
#TODO - Detect alphabet support ONCE at __init__
def _parse(handle):
"""Dynamically generated parser function (PRIVATE)."""
try:
return i(handle, alphabet=alphabet).next()
except TypeError:
return SeqIO._force_alphabet(i(handle),
alphabet).next()
self._parse = _parse
def __iter__(self):
"""Returns (id,offset) tuples."""
raise NotImplementedError("Subclass should implement this")
def get(self, offset):
"""Returns SeqRecord."""
#Should be overriden for binary file formats etc:
return self._parse(StringIO(_bytes_to_string(self.get_raw(offset))))
def get_raw(self, offset):
"""Returns bytes string (if implemented for this file format)."""
#Should be done by each sub-class (if possible)
raise NotImplementedError("Not available for this file format.")
####################
# Special indexers #
####################
# Anything where the records cannot be read simply by parsing from
# the record start. For example, anything requiring information from
# a file header - e.g. SFF files where we would need to know the
# number of flows.
class SffRandomAccess(SeqFileRandomAccess):
"""Random access to a Standard Flowgram Format (SFF) file."""
def __init__(self, filename, format, alphabet):
SeqFileRandomAccess.__init__(self, filename, format, alphabet)
header_length, index_offset, index_length, number_of_reads, \
self._flows_per_read, self._flow_chars, self._key_sequence \
= SeqIO.SffIO._sff_file_header(self._handle)
def __iter__(self):
"""Load any index block in the file, or build it the slow way (PRIVATE)."""
if self._alphabet is None:
self._alphabet = Alphabet.generic_dna
handle = self._handle
handle.seek(0)
#Alread did this in __init__ but need handle in right place
header_length, index_offset, index_length, number_of_reads, \
self._flows_per_read, self._flow_chars, self._key_sequence \
= SeqIO.SffIO._sff_file_header(handle)
if index_offset and index_length:
#There is an index provided, try this the fast way:
count = 0
try :
for name, offset in SeqIO.SffIO._sff_read_roche_index(handle) :
yield name, offset, 0
count += 1
assert count == number_of_reads, \
"Indexed %i records, expected %i" \
% (count, number_of_reads)
return
except ValueError, err :
import warnings
warnings.warn("Could not parse the SFF index: %s" % err)
assert count==0, "Partially populated index"
handle.seek(0)
#We used to give a warning in this case, but Ion Torrent's
#SFF files don't have an index so that would be annoying.
#Fall back on the slow way!
count = 0
for name, offset in SeqIO.SffIO._sff_do_slow_index(handle) :
yield name, offset, 0
count += 1
assert count == number_of_reads, \
"Indexed %i records, expected %i" % (count, number_of_reads)
def get(self, offset) :
handle = self._handle
handle.seek(offset)
return SeqIO.SffIO._sff_read_seq_record(handle,
self._flows_per_read,
self._flow_chars,
self._key_sequence,
self._alphabet)
def get_raw(self, offset):
handle = self._handle
handle.seek(offset)
return SeqIO.SffIO._sff_read_raw_record(handle, self._flows_per_read)
class SffTrimedRandomAccess(SffRandomAccess) :
def get(self, offset) :
handle = self._handle
handle.seek(offset)
return SeqIO.SffIO._sff_read_seq_record(handle,
self._flows_per_read,
self._flow_chars,
self._key_sequence,
self._alphabet,
trim=True)
###################
# Simple indexers #
###################
class SequentialSeqFileRandomAccess(SeqFileRandomAccess):
def __init__(self, filename, format, alphabet):
SeqFileRandomAccess.__init__(self, filename, format, alphabet)
marker = {"ace" : "CO ",
"embl" : "ID ",
"fasta" : ">",
"genbank" : "LOCUS ",
"gb": "LOCUS ",
"imgt" : "ID ",
"phd" : "BEGIN_SEQUENCE",
"pir" : ">..;",
"qual": ">",
"qual": ">",
"swiss" : "ID ",
"uniprot-xml" : "<entry ",
}[format]
self._marker = marker
self._marker_re = re.compile(_as_bytes("^%s" % marker))
def __iter__(self):
"""Returns (id,offset) tuples."""
marker_offset = len(self._marker)
marker_re = self._marker_re
handle = self._handle
handle.seek(0)
#Skip and header before first record
while True:
start_offset = handle.tell()
line = handle.readline()
if marker_re.match(line) or not line:
break
#Should now be at the start of a record, or end of the file
while marker_re.match(line):
#Here we can assume the record.id is the first word after the
#marker. This is generally fine... but not for GenBank, EMBL, Swiss
id = line[marker_offset:].strip().split(None, 1)[0]
while True:
line = handle.readline()
if marker_re.match(line) or not line:
end_offset = handle.tell() - len(line)
yield _bytes_to_string(id), start_offset, end_offset - start_offset
start_offset = end_offset
break
assert not line, repr(line)
def get_raw(self, offset):
"""Similar to the get method, but returns the record as a raw string."""
#For non-trivial file formats this must be over-ridden in the subclass
handle = self._handle
marker_re = self._marker_re
handle.seek(offset)
lines = [handle.readline()]
while True:
line = handle.readline()
if marker_re.match(line) or not line:
#End of file, or start of next record => end of this record
break
lines.append(line)
return _as_bytes("").join(lines)
#######################################
# Fiddly indexers: GenBank, EMBL, ... #
#######################################
class GenBankRandomAccess(SequentialSeqFileRandomAccess):
"""Indexed dictionary like access to a GenBank file."""
def __iter__(self):
handle = self._handle
handle.seek(0)
marker_re = self._marker_re
dot_char = _as_bytes(".")
accession_marker = _as_bytes("ACCESSION ")
version_marker = _as_bytes("VERSION ")
#Skip and header before first record
while True:
start_offset = handle.tell()
line = handle.readline()
if marker_re.match(line) or not line:
break
#Should now be at the start of a record, or end of the file
while marker_re.match(line):
#We cannot assume the record.id is the first word after LOCUS,
#normally the first entry on the VERSION or ACCESSION line is used.
key = None
while True:
line = handle.readline()
if marker_re.match(line) or not line:
if not key:
raise ValueError("Did not find ACCESSION/VERSION lines")
end_offset = handle.tell() - len(line)
yield _bytes_to_string(key), start_offset, end_offset - start_offset
start_offset = end_offset
break
elif line.startswith(accession_marker):
key = line.rstrip().split()[1]
elif line.startswith(version_marker):
version_id = line.rstrip().split()[1]
if version_id.count(dot_char)==1 and version_id.split(dot_char)[1].isdigit():
#This should mimic the GenBank parser...
key = version_id
assert not line, repr(line)
class EmblRandomAccess(SequentialSeqFileRandomAccess):
"""Indexed dictionary like access to an EMBL file."""
def __iter__(self):
handle = self._handle
handle.seek(0)
marker_re = self._marker_re
semi_char = _as_bytes(";")
dot_char = _as_bytes(".")
sv_marker = _as_bytes("SV ")
#Skip any header before first record
while True:
start_offset = handle.tell()
line = handle.readline()
if marker_re.match(line) or not line:
break
#Should now be at the start of a record, or end of the file
while marker_re.match(line):
#We cannot assume the record.id is the first word after ID,
#normally the SV line is used.
if line[2:].count(semi_char) == 6:
#Looks like the semi colon separated style introduced in 2006
parts = line[3:].rstrip().split(semi_char)
if parts[1].strip().startswith(sv_marker):
#The SV bit gives the version
key = parts[0].strip() + dot_char + parts[1].strip().split()[1]
else:
key = parts[0].strip()
elif line[2:].count(semi_char) == 3:
#Looks like the pre 2006 style, take first word only
key = line[3:].strip().split(None,1)[0]
else:
raise ValueError('Did not recognise the ID line layout:\n' + line)
while True:
line = handle.readline()
if marker_re.match(line) or not line:
end_offset = handle.tell() - len(line)
yield _bytes_to_string(key), start_offset, end_offset - start_offset
start_offset = end_offset
break
elif line.startswith(sv_marker):
key = line.rstrip().split()[1]
assert not line, repr(line)
class SwissRandomAccess(SequentialSeqFileRandomAccess):
"""Random access to a SwissProt file."""
def __iter__(self):
handle = self._handle
handle.seek(0)
marker_re = self._marker_re
semi_char = _as_bytes(";")
#Skip any header before first record
while True:
start_offset = handle.tell()
line = handle.readline()
if marker_re.match(line) or not line:
break
#Should now be at the start of a record, or end of the file
while marker_re.match(line):
#We cannot assume the record.id is the first word after ID,
#normally the following AC line is used.
line = handle.readline()
assert line.startswith(_as_bytes("AC "))
key = line[3:].strip().split(semi_char)[0].strip()
while True:
line = handle.readline()
if marker_re.match(line) or not line:
end_offset = handle.tell() - len(line)
yield _bytes_to_string(key), start_offset, end_offset - start_offset
start_offset = end_offset
break
assert not line, repr(line)
class UniprotRandomAccess(SequentialSeqFileRandomAccess):
"""Random access to a UniProt XML file."""
def __iter__(self):
handle = self._handle
handle.seek(0)
marker_re = self._marker_re
start_acc_marker = _as_bytes("<accession>")
end_acc_marker = _as_bytes("</accession>")
end_entry_marker = _as_bytes("</entry>")
#Skip any header before first record
while True:
start_offset = handle.tell()
line = handle.readline()
if marker_re.match(line) or not line:
break
#Should now be at the start of a record, or end of the file
while marker_re.match(line):
#We expect the next line to be <accession>xxx</accession>
#(possibly with leading spaces)
#but allow it to be later on within the <entry>
key = None
done = False
while True:
line = handle.readline()
if key is None and start_acc_marker in line:
assert end_acc_marker in line, line
key = line[line.find(start_acc_marker)+11:].split(_as_bytes("<"))[0]
elif end_entry_marker in line:
end_offset = handle.tell() - len(line) \
+ line.find(end_entry_marker) + 8
break
elif marker_re.match(line) or not line:
#Start of next record or end of file
raise ValueError("Didn't find end of record")
if not key:
raise ValueError("Did not find <accession> line in bytes %i to %i" \
% (start_offset, end_offset))
yield _bytes_to_string(key), start_offset, end_offset - start_offset
#Find start of next record
while not marker_re.match(line) and line:
start_offset = handle.tell()
line = handle.readline()
assert not line, repr(line)
def get_raw(self, offset):
"""Similar to the get method, but returns the record as a raw string."""
handle = self._handle
marker_re = self._marker_re
end_entry_marker = _as_bytes("</entry>")
handle.seek(offset)
data = handle.readline()
while True:
line = handle.readline()
i = line.find(end_entry_marker)
if i != -1:
data += line[:i+8]
break
if marker_re.match(line) or not line:
#End of file, or start of next record
raise ValueError("Didn't find end of record")
data += line
return data
def get(self, offset) :
#TODO - Can we handle this directly in the parser?
#This is a hack - use get_raw for <entry>...</entry> and wrap it with
#the apparently required XML header and footer.
data = """<?xml version='1.0' encoding='UTF-8'?>
<uniprot xmlns="http://uniprot.org/uniprot"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://uniprot.org/uniprot
http://www.uniprot.org/support/docs/uniprot.xsd">
%s
</uniprot>
""" % _bytes_to_string(self.get_raw(offset))
#TODO - For consistency, this function should not accept a string:
return SeqIO.UniprotIO.UniprotIterator(data).next()
class IntelliGeneticsRandomAccess(SeqFileRandomAccess):
"""Random access to a IntelliGenetics file."""
def __init__(self, filename, format, alphabet):
SeqFileRandomAccess.__init__(self, filename, format, alphabet)
self._marker_re = re.compile(_as_bytes("^;"))
def __iter__(self):
handle = self._handle
handle.seek(0)
marker_re = self._marker_re
semi_char = _as_bytes(";")
while True:
offset = handle.tell()
line = handle.readline()
if marker_re.match(line):
#Now look for the first line which doesn't start ";"
while True:
line = handle.readline()
if line[0:1] != semi_char and line.strip():
key = line.split()[0]
yield _bytes_to_string(key), offset, 0
break
if not line:
raise ValueError("Premature end of file?")
elif not line:
#End of file
break
def get_raw(self, offset):
handle = self._handle
handle.seek(offset)
marker_re = self._marker_re
lines = []
line = handle.readline()
semi_char = _as_bytes(";")
while line.startswith(semi_char):
lines.append(line)
line = handle.readline()
while line and not line.startswith(semi_char):
lines.append(line)
line = handle.readline()
return _as_bytes("").join(lines)
class TabRandomAccess(SeqFileRandomAccess):
"""Random access to a simple tabbed file."""
def __iter__(self):
handle = self._handle
handle.seek(0)
start_offset = handle.tell()
tab_char = _as_bytes("\t")
while True:
line = handle.readline()
if not line : break #End of file
try:
key = line.split(tab_char)[0]
except ValueError, err:
if not line.strip():
#Ignore blank lines
start_offset = handle.tell()
continue
else:
raise err
else:
end_offset = handle.tell()
yield _bytes_to_string(key), start_offset, end_offset - start_offset
start_offset = end_offset
def get_raw(self, offset):
"""Like the get method, but returns the record as a raw string."""
handle = self._handle
handle.seek(offset)
return handle.readline()
##########################
# Now the FASTQ indexers #
##########################
class FastqRandomAccess(SeqFileRandomAccess):
"""Random access to a FASTQ file (any supported variant).
With FASTQ the records all start with a "@" line, but so can quality lines.
Note this will cope with line-wrapped FASTQ files.
"""
def __iter__(self):
handle = self._handle
handle.seek(0)
id = None
start_offset = handle.tell()
line = handle.readline()
if not line:
#Empty file!
return
at_char = _as_bytes("@")
plus_char = _as_bytes("+")
if line[0:1] != at_char:
raise ValueError("Problem with FASTQ @ line:\n%s" % repr(line))
while line:
#assert line[0]=="@"
#This record seems OK (so far)
id = line[1:].rstrip().split(None, 1)[0]
#Find the seq line(s)
seq_len = 0
while line:
line = handle.readline()
if line.startswith(plus_char) : break
seq_len += len(line.strip())
if not line:
raise ValueError("Premature end of file in seq section")
#assert line[0]=="+"
#Find the qual line(s)
qual_len = 0
while line:
if seq_len == qual_len:
#Should be end of record...
line = handle.readline()
if line and line[0:1] != at_char:
ValueError("Problem with line %s" % repr(line))
break
else:
line = handle.readline()
qual_len += len(line.strip())
if seq_len != qual_len:
raise ValueError("Problem with quality section")
end_offset = handle.tell() - len(line)
yield _bytes_to_string(id), start_offset, end_offset - start_offset
start_offset = end_offset
#print "EOF"
def get_raw(self, offset):
"""Similar to the get method, but returns the record as a raw string."""
#TODO - Refactor this and the __init__ method to reduce code duplication?
handle = self._handle
handle.seek(offset)
line = handle.readline()
data = line
at_char = _as_bytes("@")
plus_char = _as_bytes("+")
if line[0:1] != at_char:
raise ValueError("Problem with FASTQ @ line:\n%s" % repr(line))
identifier = line[1:].rstrip().split(None, 1)[0]
#Find the seq line(s)
seq_len = 0
while line:
line = handle.readline()
data += line
if line.startswith(plus_char) : break
seq_len += len(line.strip())
if not line:
raise ValueError("Premature end of file in seq section")
assert line[0:1] == plus_char
#Find the qual line(s)
qual_len = 0
while line:
if seq_len == qual_len:
#Should be end of record...
pos = handle.tell()
line = handle.readline()
if line and line[0:1] != at_char:
ValueError("Problem with line %s" % repr(line))
break
else:
line = handle.readline()
data += line
qual_len += len(line.strip())
if seq_len != qual_len:
raise ValueError("Problem with quality section")
return data
###############################################################################
_FormatToRandomAccess = {"ace" : SequentialSeqFileRandomAccess,
"embl" : EmblRandomAccess,
"fasta" : SequentialSeqFileRandomAccess,
"fastq" : FastqRandomAccess, #Class handles all three variants
"fastq-sanger" : FastqRandomAccess, #alias of the above
"fastq-solexa" : FastqRandomAccess,
"fastq-illumina" : FastqRandomAccess,
"genbank" : GenBankRandomAccess,
"gb" : GenBankRandomAccess, #alias of the above
"ig" : IntelliGeneticsRandomAccess,
"imgt" : EmblRandomAccess,
"phd" : SequentialSeqFileRandomAccess,
"pir" : SequentialSeqFileRandomAccess,
"sff" : SffRandomAccess,
"sff-trim" : SffTrimedRandomAccess,
"swiss" : SwissRandomAccess,
"tab" : TabRandomAccess,
"qual" : SequentialSeqFileRandomAccess,
"uniprot-xml" : UniprotRandomAccess,
}
| 41.710158 | 116 | 0.554396 |
7954c4e00a057f68f0ffefad12f42616bf1cc3e4 | 10,559 | py | Python | services/traction/bdd-tests/features/steps/credential_templates.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 12 | 2022-01-29T20:30:03.000Z | 2022-03-29T11:46:14.000Z | services/traction/bdd-tests/features/steps/credential_templates.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 38 | 2021-11-22T17:52:50.000Z | 2022-03-31T17:52:00.000Z | services/traction/bdd-tests/features/steps/credential_templates.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 9 | 2021-11-22T18:05:48.000Z | 2022-03-29T11:25:08.000Z | import json
import time
from behave import *
from starlette import status
from v1_api import *
@then(
'wait {timeout:d} seconds until "{tenant}" can create credential template for "{name}"'
)
def step_impl(
context,
timeout: int,
tenant: str,
name: str,
):
schema_template = context.config.userdata[tenant]["governance"]["schema_templates"][
name
]
ex_result_found = False
timeout = int(timeout)
check_period = float(timeout / 20) if timeout > 20 else 1
time_passed = 0
while time_passed < timeout:
time.sleep(check_period)
time_passed = time_passed + check_period
response = get_schema_template(
context, tenant, schema_template["schema_template_id"]
)
resp_json = json.loads(response.content)
ex_result_found = resp_json["item"]["status"] == "Active"
if ex_result_found:
break
assert (
ex_result_found
), f"after {time_passed} seconds, schema_template found was {resp_json}"
print(f"Polled for {timeout - time_passed} seconds")
@given('"{tenant}" creates credential template for "{name}" by schema_id')
def step_impl(context, tenant: str, name: str):
schema_template = context.config.userdata[tenant]["governance"]["schema_templates"][
name
]
payload = {
"credential_definition": {
"tag": "default",
"revocation_enabled": False,
"revocation_registry_size": 0,
},
"schema_id": schema_template["schema_id"],
"name": name,
"tags": [],
}
response = create_credential_template(context, tenant, payload)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
context.config.userdata[tenant]["governance"]["credential_templates"][
name
] = resp_json["item"]
@given('"{tenant}" creates credential template for "{name}" by schema_template_id')
def step_impl(context, tenant: str, name: str):
schema_template = context.config.userdata[tenant]["governance"]["schema_templates"][
name
]
payload = {
"credential_definition": {
"tag": "default",
"revocation_enabled": False,
"revocation_registry_size": 0,
},
"schema_template_id": schema_template["schema_template_id"],
"name": name,
"tags": [],
}
response = create_credential_template(context, tenant, payload)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
context.config.userdata[tenant]["governance"]["credential_templates"][
name
] = resp_json["item"]
@given('"{tenant}" cannot create a credential template')
def step_impl(context, tenant: str):
payload = {
"credential_definition": {
"tag": "default",
"revocation_enabled": False,
"revocation_registry_size": 0,
},
"schema_id": "do not need one",
"name": "cannot create",
"tags": [],
}
response = create_credential_template(context, tenant, payload)
assert response.status_code == status.HTTP_401_UNAUTHORIZED, response.__dict__
@then('"{tenant}" can find credential template "{name}" by name')
def step_impl(context, tenant: str, name: str):
params = {"name": name}
response = list_credential_templates(context, tenant, params)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json["items"]) == 1, resp_json
assert resp_json["items"][0]["name"] == name
@then('"{tenant}" can find credential template "{name}" by tags "{tags}"')
def step_impl(context, tenant: str, name: str, tags: str):
params = {"tags": tags, "name": name}
response = list_credential_templates(context, tenant, params)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json["items"]) == 1, resp_json
assert resp_json["items"][0]["name"] == name
_tags = [x.strip() for x in tags.split(",")]
for t in _tags:
assert t in resp_json["items"][0]["tags"]
@then('"{tenant}" can find credential template "{name}" by schema_id')
def step_impl(context, tenant: str, name: str):
schema_template = context.config.userdata[tenant]["governance"]["schema_templates"][
name
]
params = {"schema_id": schema_template["schema_id"]}
response = list_credential_templates(context, tenant, params)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json["items"]) == 1, resp_json
assert resp_json["items"][0]["schema_id"] == schema_template["schema_id"]
@then('"{tenant}" can find credential template "{name}" by cred_def_id')
def step_impl(context, tenant: str, name: str):
credential_template = context.config.userdata[tenant]["governance"][
"credential_templates"
][name]
params = {"cred_def_id": credential_template["cred_def_id"]}
response = list_credential_templates(context, tenant, params)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json["items"]) == 1, resp_json
assert resp_json["items"][0]["cred_def_id"] == credential_template["cred_def_id"]
@then('"{tenant}" cannot credential template find "{name}" by name')
def step_impl(context, tenant: str, name: str):
params = {"name": name}
response = list_credential_templates(context, tenant, params)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json["items"]) == 0, resp_json
@then('"{tenant}" can find credential template "{name}" with deleted flag')
def step_impl(context, tenant: str, name: str):
params = {"name": name, "deleted": True}
response = list_credential_templates(context, tenant, params)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json["items"]) == 1, resp_json
assert resp_json["items"][0]["name"] == name
assert resp_json["items"][0]["deleted"]
assert resp_json["items"][0]["status"] == "Deleted"
@then('"{tenant}" can get credential template "{name}" by credential_template_id')
def step_impl(context, tenant: str, name: str):
credential_template = context.config.userdata[tenant]["governance"][
"credential_templates"
][name]
response = get_credential_template(
context, tenant, credential_template["credential_template_id"]
)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert (
resp_json["item"]["credential_template_id"]
== credential_template["credential_template_id"]
)
@then('"{tenant}" can update credential template "{name}"')
def step_impl(context, tenant: str, name: str):
credential_template = context.config.userdata[tenant]["governance"][
"credential_templates"
][name]
payload = {"credential_template_id": credential_template["credential_template_id"]}
for row in context.table:
attribute = row["attribute"]
value = row["value"]
if attribute == "tags":
value = row["value"].split(",")
payload[attribute] = value
response = update_credential_template(
context, tenant, credential_template["credential_template_id"], payload
)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
item = resp_json["item"]
assert (
item["credential_template_id"] == credential_template["credential_template_id"]
)
for row in context.table:
attribute = row["attribute"]
value = row["value"]
if attribute == "tags":
value = row["value"].split(",")
assert item[attribute] == value
@then('"{tenant}" can delete credential template "{name}"')
def step_impl(context, tenant: str, name: str):
credential_template = context.config.userdata[tenant]["governance"][
"credential_templates"
][name]
response = delete_credential_template(
context, tenant, credential_template["credential_template_id"]
)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
item = resp_json["item"]
assert (
item["credential_template_id"] == credential_template["credential_template_id"]
)
assert item["deleted"]
assert item["status"] == "Deleted"
@then('"{tenant}" cannot find credential template "{name}" by cred_def_id')
def step_impl(context, tenant: str, name: str):
credential_template = context.config.userdata[tenant]["governance"][
"credential_templates"
][name]
params = {"cred_def_id": credential_template["cred_def_id"]}
response = list_credential_templates(context, tenant, params)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json["items"]) == 0, resp_json
@then('"{tenant}" cannot get credential template "{name}" by credential_template_id')
def step_impl(context, tenant: str, name: str):
credential_template = context.config.userdata[tenant]["governance"][
"credential_templates"
][name]
response = get_credential_template(
context, tenant, credential_template["credential_template_id"]
)
assert response.status_code == status.HTTP_404_NOT_FOUND, response.__dict__
@then('"{tenant}" can get credential template "{name}" with deleted flag')
def step_impl(context, tenant: str, name: str):
credential_template = context.config.userdata[tenant]["governance"][
"credential_templates"
][name]
params = {"deleted": True}
response = get_credential_template(
context, tenant, credential_template["credential_template_id"], params
)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert (
resp_json["item"]["credential_template_id"]
== credential_template["credential_template_id"]
)
assert resp_json["item"]["deleted"]
assert resp_json["item"]["status"] == "Deleted"
| 36.285223 | 91 | 0.677053 |
7954c5017caa2553016030dcfd75a243e1c23592 | 6,347 | py | Python | async_retriever/print_versions.py | cheginit/async_retriever | ee90b7cc0b56664b46ec4723723ed52a346165e3 | [
"MIT"
] | 1 | 2022-03-07T23:40:48.000Z | 2022-03-07T23:40:48.000Z | async_retriever/print_versions.py | cheginit/async_retriever | ee90b7cc0b56664b46ec4723723ed52a346165e3 | [
"MIT"
] | 30 | 2021-05-12T06:23:21.000Z | 2022-03-22T08:14:41.000Z | async_retriever/print_versions.py | cheginit/async_retriever | ee90b7cc0b56664b46ec4723723ed52a346165e3 | [
"MIT"
] | null | null | null | """Utility functions for printing version information.
The original script is from
`xarray <https://github.com/pydata/xarray/blob/master/xarray/util/print_versions.py>`__
"""
import importlib
import locale
import os
import platform
import struct
import subprocess
import sys
from types import ModuleType
from typing import List, Optional, TextIO, Tuple
__all__ = ["show_versions"]
def get_sys_info() -> List[Tuple[str, Optional[str]]]:
"""Return system information as a dict.
From https://github.com/numpy/numpy/blob/master/setup.py#L64-L89
Returns
-------
list
System information such as python version.
"""
blob = []
def _minimal_ext_cmd(cmd: List[str]) -> bytes:
# construct minimal environment
env = {}
for k in ["SYSTEMROOT", "PATH", "HOME"]:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env["LANGUAGE"] = "C"
env["LANG"] = "C"
env["LC_ALL"] = "C"
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
commit = None
try:
out = _minimal_ext_cmd(["git", "rev-parse", "HEAD"])
commit = out.strip().decode("ascii")
except (subprocess.SubprocessError, OSError):
pass
blob.append(("commit", commit))
(sysname, _, release, _, machine, processor) = platform.uname()
blob.extend(
[
("python", sys.version),
("python-bits", f"{struct.calcsize('P') * 8}"),
("OS", f"{sysname}"),
("OS-release", f"{release}"),
("machine", f"{machine}"),
("processor", f"{processor}"),
("byteorder", f"{sys.byteorder}"),
("LC_ALL", f'{os.environ.get("LC_ALL", "None")}'),
("LANG", f'{os.environ.get("LANG", "None")}'),
("LOCALE", ".".join(str(i) for i in locale.getlocale())),
],
)
blob.extend(netcdf_and_hdf5_versions())
return blob
def netcdf_and_hdf5_versions() -> List[Tuple[str, Optional[str]]]:
libhdf5_version = None
libnetcdf_version = None
try:
import netCDF4
libhdf5_version = netCDF4.__hdf5libversion__
libnetcdf_version = netCDF4.__netcdf4libversion__
except (ImportError, AttributeError):
try:
import h5py
libhdf5_version = h5py.version.hdf5_version
except (ImportError, AttributeError):
pass
return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
def show_versions(file: TextIO = sys.stdout) -> None:
"""Print versions of all the dependencies.
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
deps = [
# async_retriever
("async-retriever", lambda mod: mod.__version__),
("aiodns", lambda mod: mod.__version__),
("aiohttp", lambda mod: mod.__version__),
("aiohttp-client-cache", lambda mod: mod.__version__),
("aiosqlite", lambda mod: mod.__version__),
("brotli", lambda mod: mod.__version__),
("cchardet", lambda mod: mod.__version__),
("cytoolz", lambda mod: mod.__version__),
("ujson", lambda mod: mod.__version__),
# pygeoogc
("pygeoogc", lambda mod: mod.__version__),
("defusedxml", lambda mod: mod.__version__),
("owslib", lambda mod: mod.__version__),
("pydantic", lambda mod: mod.version.VERSION),
("yaml", lambda mod: mod.__version__),
("pyproj", lambda mod: mod.__version__),
("requests", lambda mod: mod.__version__),
("requests-cache", lambda mod: mod.__version__),
("shapely", lambda mod: mod.__version__),
("urllib3", lambda mod: mod.__version__),
# pygeoutils
("pygeoutils", lambda mod: mod.__version__),
("dask", lambda mod: mod.__version__),
("geopandas", lambda mod: mod.__version__),
("netCDF4", lambda mod: mod.__version__),
("numpy", lambda mod: mod.__version__),
("rasterio", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("rioxarray", lambda mod: mod.__version__),
# py3dep
("py3dep", lambda mod: mod.__version__),
("click", lambda mod: mod.__version__),
("scipy", lambda mod: mod.__version__),
("richdem", lambda mod: mod.pkg_resources.require("richdem")[0].version),
# pynhd
("pynhd", lambda mod: mod.__version__),
("networkx", lambda mod: mod.__version__),
("pandas", lambda mod: mod.__version__),
("pyarrow", lambda mod: mod.__version__),
# pygeohydro
("pygeohydro", lambda mod: mod.__version__),
("folium", lambda mod: mod.__version__),
("lxml", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
# pydaymet
("pydaymet", lambda mod: mod.__version__),
# misc
("bottleneck", lambda mod: mod.__version__),
("pygeos", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
# test
("pytest", lambda mod: mod.__version__),
("pytest-cov", lambda mod: mod.__version__),
("xdist", lambda mod: mod.__version__),
]
deps_blob: List[Tuple[str, Optional[str]]] = []
for (modname, ver_f) in deps:
try:
mod = _get_mod(modname)
except ModuleNotFoundError:
deps_blob.append((modname, None))
else:
try:
ver = ver_f(mod) # type: ignore
except (NotImplementedError, AttributeError):
ver = "installed"
deps_blob.append((modname, ver))
print("\nINSTALLED VERSIONS", file=file)
print("------------------", file=file)
for k, stat in get_sys_info():
print(f"{k}: {stat}", file=file)
print("", file=file)
for k, stat in sorted(deps_blob):
print(f"{k}: {stat}", file=file)
def _get_mod(modname: str) -> ModuleType:
if modname in sys.modules:
return sys.modules[modname]
try:
return importlib.import_module(modname)
except ModuleNotFoundError:
return importlib.import_module(modname.replace("-", "_"))
| 33.582011 | 87 | 0.587679 |
7954c5a2082e0c55c3b0ecc89bb16db04c0bd912 | 6,681 | py | Python | bindings/python/ensmallen_graph/datasets/string/hyperthermusbutylicus.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/hyperthermusbutylicus.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/hyperthermusbutylicus.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Hyperthermus butylicus.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:46:39.456495
The undirected graph Hyperthermus butylicus has 1576 nodes and 82637 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06658 and has 19 connected components, where the component with most
nodes has 1536 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 92, the mean node degree is 104.87, and
the node degree mode is 1. The top 5 most central nodes are 415426.Hbut_0939
(degree 508), 415426.Hbut_0946 (degree 489), 415426.Hbut_1638 (degree 488),
415426.Hbut_1573 (degree 480) and 415426.Hbut_0967 (degree 478).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HyperthermusButylicus
# Then load the graph
graph = HyperthermusButylicus()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def HyperthermusButylicus(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Hyperthermus butylicus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Hyperthermus butylicus graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:46:39.456495
The undirected graph Hyperthermus butylicus has 1576 nodes and 82637 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06658 and has 19 connected components, where the component with most
nodes has 1536 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 92, the mean node degree is 104.87, and
the node degree mode is 1. The top 5 most central nodes are 415426.Hbut_0939
(degree 508), 415426.Hbut_0946 (degree 489), 415426.Hbut_1638 (degree 488),
415426.Hbut_1573 (degree 480) and 415426.Hbut_0967 (degree 478).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HyperthermusButylicus
# Then load the graph
graph = HyperthermusButylicus()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="HyperthermusButylicus",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.349206 | 223 | 0.703488 |
7954c5aa04cecfe82f11661190356497e83ee323 | 23,842 | py | Python | lib/modeling/model_builder.py | mbajaj01/seg_every_thing | 2593487754daf625a951f1e417f6389a211bd004 | [
"Apache-2.0"
] | 429 | 2018-04-28T00:01:57.000Z | 2021-12-18T12:53:22.000Z | lib/modeling/model_builder.py | mbajaj01/seg_every_thing | 2593487754daf625a951f1e417f6389a211bd004 | [
"Apache-2.0"
] | 13 | 2018-07-20T03:07:12.000Z | 2022-02-20T10:26:20.000Z | lib/modeling/model_builder.py | mbajaj01/seg_every_thing | 2593487754daf625a951f1e417f6389a211bd004 | [
"Apache-2.0"
] | 82 | 2018-04-28T06:15:53.000Z | 2022-02-17T04:16:21.000Z | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Detectron model construction functions.
Detectron supports a large number of model types. The configuration space is
large. To get a sense, a given model is in element in the cartesian product of:
- backbone (e.g., VGG16, ResNet, ResNeXt)
- FPN (on or off)
- RPN only (just proposals)
- Fixed proposals for Fast R-CNN, RFCN, Mask R-CNN (with or without keypoints)
- End-to-end model with RPN + Fast R-CNN (i.e., Faster R-CNN), Mask R-CNN, ...
- Different "head" choices for the model
- ... many configuration options ...
A given model is made by combining many basic components. The result is flexible
though somewhat complex to understand at first.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import importlib
import logging
from caffe2.python import core
from caffe2.python import workspace
from core.config import cfg
from modeling.detector import DetectionModelHelper
from roi_data.loader import RoIDataLoader
import modeling.fast_rcnn_heads as fast_rcnn_heads
import modeling.keypoint_rcnn_heads as keypoint_rcnn_heads
import modeling.mask_rcnn_heads as mask_rcnn_heads
import modeling.name_compat
import modeling.optimizer as optim
import modeling.retinanet_heads as retinanet_heads
import modeling.rfcn_heads as rfcn_heads
import modeling.rpn_heads as rpn_heads
import roi_data.minibatch
import utils.c2 as c2_utils
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------- #
# Generic recomposable model builders
#
# For example, you can create a Fast R-CNN model with the ResNet-50-C4 backbone
# with the configuration:
#
# MODEL:
# TYPE: generalized_rcnn
# CONV_BODY: ResNet.add_ResNet50_conv4_body
# ROI_HEAD: ResNet.add_ResNet_roi_conv5_head
# ---------------------------------------------------------------------------- #
def generalized_rcnn(model):
"""This model type handles:
- Fast R-CNN
- RPN only (not integrated with Fast R-CNN)
- Faster R-CNN (stagewise training from NIPS paper)
- Faster R-CNN (end-to-end joint training)
- Mask R-CNN (stagewise training from NIPS paper)
- Mask R-CNN (end-to-end joint training)
"""
return build_generic_detection_model(
model,
get_func(cfg.MODEL.CONV_BODY),
add_roi_box_head_func=get_func(cfg.FAST_RCNN.ROI_BOX_HEAD),
add_roi_mask_head_func=get_func(cfg.MRCNN.ROI_MASK_HEAD),
add_roi_keypoint_head_func=get_func(cfg.KRCNN.ROI_KEYPOINTS_HEAD),
freeze_conv_body=cfg.TRAIN.FREEZE_CONV_BODY
)
def rfcn(model):
# TODO(rbg): fold into build_generic_detection_model
return build_generic_rfcn_model(model, get_func(cfg.MODEL.CONV_BODY))
def retinanet(model):
# TODO(rbg): fold into build_generic_detection_model
return build_generic_retinanet_model(model, get_func(cfg.MODEL.CONV_BODY))
# ---------------------------------------------------------------------------- #
# Helper functions for building various re-usable network bits
# ---------------------------------------------------------------------------- #
def create(model_type_func, train=False, gpu_id=0):
"""Generic model creation function that dispatches to specific model
building functions.
By default, this function will generate a data parallel model configured to
run on cfg.NUM_GPUS devices. However, you can restrict it to build a model
targeted to a specific GPU by specifying gpu_id. This is used by
optimizer.build_data_parallel_model() during test time.
"""
model = DetectionModelHelper(
name=model_type_func,
train=train,
num_classes=cfg.MODEL.NUM_CLASSES,
init_params=train
)
model.only_build_forward_pass = False
model.target_gpu_id = gpu_id
return get_func(model_type_func)(model)
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
new_func_name = modeling.name_compat.get_new_name(func_name)
if new_func_name != func_name:
logger.warn(
'Remapping old function name: {} -> {}'.
format(func_name, new_func_name)
)
func_name = new_func_name
try:
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'modeling.' + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
logger.error('Failed to find function: {}'.format(func_name))
raise
def build_generic_detection_model(
model,
add_conv_body_func,
add_roi_box_head_func=None,
add_roi_mask_head_func=None,
add_roi_keypoint_head_func=None,
freeze_conv_body=False
):
def _single_gpu_build_func(model):
"""Build the model on a single GPU. Can be called in a loop over GPUs
with name and device scoping to create a data parallel model.
"""
# Add the conv body (called "backbone architecture" in papers)
# E.g., ResNet-50, ResNet-50-FPN, ResNeXt-101-FPN, etc.
blob_conv, dim_conv, spatial_scale_conv = add_conv_body_func(model)
if freeze_conv_body:
for b in c2_utils.BlobReferenceList(blob_conv):
model.StopGradient(b, b)
if not model.train: # == inference
# Create a net that can be used to execute the conv body on an image
# (without also executing RPN or any other network heads)
model.conv_body_net = model.net.Clone('conv_body_net')
head_loss_gradients = {
'rpn': None,
'box': None,
'mask': None,
'keypoints': None,
}
if cfg.RPN.RPN_ON:
# Add the RPN head
head_loss_gradients['rpn'] = rpn_heads.add_generic_rpn_outputs(
model, blob_conv, dim_conv, spatial_scale_conv
)
if cfg.FPN.FPN_ON:
# After adding the RPN head, restrict FPN blobs and scales to
# those used in the RoI heads
blob_conv, spatial_scale_conv = _narrow_to_fpn_roi_levels(
blob_conv, spatial_scale_conv
)
if not cfg.MODEL.RPN_ONLY:
# Add the Fast R-CNN head
head_loss_gradients['box'] = _add_fast_rcnn_head(
model, add_roi_box_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.MASK_ON:
# Add the mask head
head_loss_gradients['mask'] = _add_roi_mask_head(
model, add_roi_mask_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.KEYPOINTS_ON:
# Add the keypoint head
head_loss_gradients['keypoint'] = _add_roi_keypoint_head(
model, add_roi_keypoint_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if model.train:
loss_gradients = {}
if cfg.TRAIN.TRAIN_MASK_HEAD_ONLY:
loss_gradients.update(head_loss_gradients['mask'])
else:
for lg in head_loss_gradients.values():
if lg is not None:
loss_gradients.update(lg)
return loss_gradients
else:
return None
optim.build_data_parallel_model(model, _single_gpu_build_func)
return model
def _narrow_to_fpn_roi_levels(blobs, spatial_scales):
"""Return only the blobs and spatial scales that will be used for RoI heads.
Inputs `blobs` and `spatial_scales` may include extra blobs and scales that
are used for RPN proposals, but not for RoI heads.
"""
# Code only supports case when RPN and ROI min levels are the same
assert cfg.FPN.RPN_MIN_LEVEL == cfg.FPN.ROI_MIN_LEVEL
# RPN max level can be >= to ROI max level
assert cfg.FPN.RPN_MAX_LEVEL >= cfg.FPN.ROI_MAX_LEVEL
# FPN RPN max level might be > FPN ROI max level in which case we
# need to discard some leading conv blobs (blobs are ordered from
# max/coarsest level to min/finest level)
num_roi_levels = cfg.FPN.ROI_MAX_LEVEL - cfg.FPN.ROI_MIN_LEVEL + 1
return blobs[-num_roi_levels:], spatial_scales[-num_roi_levels:]
def _add_fast_rcnn_head(
model, add_roi_box_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a Fast R-CNN head to the model."""
blob_frcn, dim_frcn = add_roi_box_head_func(
model, blob_in, dim_in, spatial_scale_in
)
fast_rcnn_heads.add_fast_rcnn_outputs(model, blob_frcn, dim_frcn)
if model.train and not cfg.TRAIN.TRAIN_MASK_HEAD_ONLY:
loss_gradients = fast_rcnn_heads.add_fast_rcnn_losses(model)
else:
loss_gradients = None
return loss_gradients
def _add_roi_mask_head(
model, add_roi_mask_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a mask prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the mask head
blob_mask_head, dim_mask_head = add_roi_mask_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the mask output
blob_mask = mask_rcnn_heads.add_mask_rcnn_outputs(
model, blob_mask_head, dim_mask_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then mask predictions.
# This requires separate nets for box and mask prediction.
# So we extract the mask prediction net, store it as its own network,
# then restore model.net to be the bbox-only network
model.mask_net, blob_mask = c2_utils.SuffixNet(
'mask_net', model.net, len(bbox_net.op), blob_mask
)
model.net._net = bbox_net
loss_gradients = None
else:
loss_gradients = mask_rcnn_heads.add_mask_rcnn_losses(model, blob_mask)
return loss_gradients
def _add_roi_keypoint_head(
model, add_roi_keypoint_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a keypoint prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the keypoint head
blob_keypoint_head, dim_keypoint_head = add_roi_keypoint_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the keypoint output
blob_keypoint = keypoint_rcnn_heads.add_keypoint_outputs(
model, blob_keypoint_head, dim_keypoint_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then keypoint predictions
# This requires separate nets for box and keypoint prediction.
# So we extract the keypoint prediction net, store it as its own
# network, then restore model.net to be the bbox-only network
model.keypoint_net, keypoint_blob_out = c2_utils.SuffixNet(
'keypoint_net', model.net, len(bbox_net.op), blob_keypoint
)
model.net._net = bbox_net
loss_gradients = None
elif not cfg.TRAIN.TRAIN_MASK_HEAD_ONLY:
loss_gradients = keypoint_rcnn_heads.add_keypoint_losses(model)
else:
loss_gradients = None
return loss_gradients
def build_generic_rfcn_model(model, add_conv_body_func, dim_reduce=None):
# TODO(rbg): fold this function into build_generic_detection_model
def _single_gpu_build_func(model):
"""Builds the model on a single GPU. Can be called in a loop over GPUs
with name and device scoping to create a data parallel model."""
blob, dim, spatial_scale = add_conv_body_func(model)
if not model.train:
model.conv_body_net = model.net.Clone('conv_body_net')
rfcn_heads.add_rfcn_outputs(model, blob, dim, dim_reduce, spatial_scale)
if model.train:
loss_gradients = fast_rcnn_heads.add_fast_rcnn_losses(model)
return loss_gradients if model.train else None
optim.build_data_parallel_model(model, _single_gpu_build_func)
return model
def build_generic_retinanet_model(
model, add_conv_body_func, freeze_conv_body=False
):
# TODO(rbg): fold this function into build_generic_detection_model
def _single_gpu_build_func(model):
"""Builds the model on a single GPU. Can be called in a loop over GPUs
with name and device scoping to create a data parallel model."""
blobs, dim, spatial_scales = add_conv_body_func(model)
if not model.train:
model.conv_body_net = model.net.Clone('conv_body_net')
retinanet_heads.add_fpn_retinanet_outputs(
model, blobs, dim, spatial_scales
)
if model.train:
loss_gradients = retinanet_heads.add_fpn_retinanet_losses(
model
)
return loss_gradients if model.train else None
optim.build_data_parallel_model(model, _single_gpu_build_func)
return model
# ---------------------------------------------------------------------------- #
# Network inputs
# ---------------------------------------------------------------------------- #
def add_training_inputs(model, roidb=None):
"""Create network input ops and blobs used for training. To be called
*after* model_builder.create().
"""
# Implementation notes:
# Typically, one would create the input ops and then the rest of the net.
# However, creating the input ops depends on loading the dataset, which
# can take a few minutes for COCO.
# We prefer to avoid waiting so debugging can fail fast.
# Thus, we create the net *without input ops* prior to loading the
# dataset, and then add the input ops after loading the dataset.
# Since we defer input op creation, we need to do a little bit of surgery
# to place the input ops at the start of the network op list.
assert model.train, 'Training inputs can only be added to a trainable model'
if roidb is not None:
# To make debugging easier you can set cfg.DATA_LOADER.NUM_THREADS = 1
model.roi_data_loader = RoIDataLoader(
roidb, num_loaders=cfg.DATA_LOADER.NUM_THREADS
)
orig_num_op = len(model.net._net.op)
blob_names = roi_data.minibatch.get_minibatch_blob_names(
is_training=True
)
for gpu_id in range(cfg.NUM_GPUS):
with c2_utils.NamedCudaScope(gpu_id):
for blob_name in blob_names:
workspace.CreateBlob(core.ScopedName(blob_name))
model.net.DequeueBlobs(
model.roi_data_loader._blobs_queue_name, blob_names
)
# A little op surgery to move input ops to the start of the net
diff = len(model.net._net.op) - orig_num_op
new_op = model.net._net.op[-diff:] + model.net._net.op[:-diff]
del model.net._net.op[:]
model.net._net.op.extend(new_op)
def add_inference_inputs(model):
"""Create network input blobs used for inference."""
def create_input_blobs_for_net(net_def):
for op in net_def.op:
for blob_in in op.input:
if not workspace.HasBlob(blob_in):
workspace.CreateBlob(blob_in)
create_input_blobs_for_net(model.net.Proto())
if cfg.MODEL.MASK_ON:
create_input_blobs_for_net(model.mask_net.Proto())
if cfg.MODEL.KEYPOINTS_ON:
create_input_blobs_for_net(model.keypoint_net.Proto())
# ---------------------------------------------------------------------------- #
# ********************** DEPRECATED FUNCTIONALITY BELOW ********************** #
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# Hardcoded functions to create various types of common models
#
# *** This type of model definition is deprecated ***
# *** Use the generic composable versions instead ***
#
# ---------------------------------------------------------------------------- #
import modeling.ResNet as ResNet
import modeling.VGG16 as VGG16
import modeling.VGG_CNN_M_1024 as VGG_CNN_M_1024
def fast_rcnn(model):
logger.warn('Deprecated: use `MODEL.TYPE: generalized_rcnn`.')
return generalized_rcnn(model)
def mask_rcnn(model):
logger.warn(
'Deprecated: use `MODEL.TYPE: generalized_rcnn` with '
'`MODEL.MASK_ON: True`'
)
return generalized_rcnn(model)
def keypoint_rcnn(model):
logger.warn(
'Deprecated: use `MODEL.TYPE: generalized_rcnn` with '
'`MODEL.KEYPOINTS_ON: True`'
)
return generalized_rcnn(model)
def mask_and_keypoint_rcnn(model):
logger.warn(
'Deprecated: use `MODEL.TYPE: generalized_rcnn` with '
'`MODEL.MASK_ON: True and ``MODEL.KEYPOINTS_ON: True`'
)
return generalized_rcnn(model)
def rpn(model):
logger.warn(
'Deprecated: use `MODEL.TYPE: generalized_rcnn` with '
'`MODEL.RPN_ONLY: True`'
)
return generalized_rcnn(model)
def fpn_rpn(model):
logger.warn(
'Deprecated: use `MODEL.TYPE: generalized_rcnn` with '
'`MODEL.RPN_ONLY: True` and FPN enabled via configs'
)
return generalized_rcnn(model)
def faster_rcnn(model):
logger.warn(
'Deprecated: use `MODEL.TYPE: generalized_rcnn` with '
'`MODEL.FASTER_RCNN: True`'
)
return generalized_rcnn(model)
def fast_rcnn_frozen_features(model):
logger.warn('Deprecated: use `TRAIN.FREEZE_CONV_BODY: True` instead')
return build_generic_detection_model(
model,
get_func(cfg.MODEL.CONV_BODY),
add_roi_box_head_func=get_func(cfg.FAST_RCNN.ROI_BOX_HEAD),
freeze_conv_body=True
)
def rpn_frozen_features(model):
logger.warn('Deprecated: use `TRAIN.FREEZE_CONV_BODY: True` instead')
return build_generic_detection_model(
model, get_func(cfg.MODEL.CONV_BODY), freeze_conv_body=True
)
def fpn_rpn_frozen_features(model):
logger.warn('Deprecated: use `TRAIN.FREEZE_CONV_BODY: True` instead')
return build_generic_detection_model(
model, get_func(cfg.MODEL.CONV_BODY), freeze_conv_body=True
)
def mask_rcnn_frozen_features(model):
logger.warn('Deprecated: use `TRAIN.FREEZE_CONV_BODY: True` instead')
return build_generic_detection_model(
model,
get_func(cfg.MODEL.CONV_BODY),
add_roi_box_head_func=get_func(cfg.FAST_RCNN.ROI_BOX_HEAD),
add_roi_mask_head_func=get_func(cfg.MRCNN.ROI_MASK_HEAD),
freeze_conv_body=True
)
def keypoint_rcnn_frozen_features(model):
logger.warn('Deprecated: use `TRAIN.FREEZE_CONV_BODY: True` instead')
return build_generic_detection_model(
model,
get_func(cfg.MODEL.CONV_BODY),
add_roi_box_head_func=get_func(cfg.FAST_RCNN.ROI_BOX_HEAD),
add_roi_keypoint_head_func=get_func(cfg.KRCNN.ROI_KEYPOINTS_HEAD),
freeze_conv_body=True
)
# ---------------------------------------------------------------------------- #
# Fast R-CNN models
# ---------------------------------------------------------------------------- #
def VGG_CNN_M_1024_fast_rcnn(model):
return build_generic_detection_model(
model, VGG_CNN_M_1024.add_VGG_CNN_M_1024_conv5_body,
VGG_CNN_M_1024.add_VGG_CNN_M_1024_roi_fc_head
)
def VGG16_fast_rcnn(model):
return build_generic_detection_model(
model, VGG16.add_VGG16_conv5_body, VGG16.add_VGG16_roi_fc_head
)
def ResNet50_fast_rcnn(model):
return build_generic_detection_model(
model, ResNet.add_ResNet50_conv4_body, ResNet.add_ResNet_roi_conv5_head
)
def ResNet101_fast_rcnn(model):
return build_generic_detection_model(
model, ResNet.add_ResNet101_conv4_body, ResNet.add_ResNet_roi_conv5_head
)
def ResNet50_fast_rcnn_frozen_features(model):
return build_generic_detection_model(
model,
ResNet.add_ResNet50_conv4_body,
ResNet.add_ResNet_roi_conv5_head,
freeze_conv_body=True
)
def ResNet101_fast_rcnn_frozen_features(model):
return build_generic_detection_model(
model,
ResNet.add_ResNet101_conv4_body,
ResNet.add_ResNet_roi_conv5_head,
freeze_conv_body=True
)
# ---------------------------------------------------------------------------- #
# RPN-only models
# ---------------------------------------------------------------------------- #
def VGG_CNN_M_1024_rpn(model):
return build_generic_detection_model(
model, VGG_CNN_M_1024.add_VGG_CNN_M_1024_conv5_body
)
def VGG16_rpn(model):
return build_generic_detection_model(model, VGG16.add_VGG16_conv5_body)
def ResNet50_rpn_conv4(model):
return build_generic_detection_model(model, ResNet.add_ResNet50_conv4_body)
def ResNet101_rpn_conv4(model):
return build_generic_detection_model(model, ResNet.add_ResNet101_conv4_body)
def VGG_CNN_M_1024_rpn_frozen_features(model):
return build_generic_detection_model(
model,
VGG_CNN_M_1024.add_VGG_CNN_M_1024_conv5_body,
freeze_conv_body=True
)
def VGG16_rpn_frozen_features(model):
return build_generic_detection_model(
model, VGG16.add_VGG16_conv5_body, freeze_conv_body=True
)
def ResNet50_rpn_conv4_frozen_features(model):
return build_generic_detection_model(
model, ResNet.add_ResNet50_conv4_body, freeze_conv_body=True
)
def ResNet101_rpn_conv4_frozen_features(model):
return build_generic_detection_model(
model, ResNet.add_ResNet101_conv4_body, freeze_conv_body=True
)
# ---------------------------------------------------------------------------- #
# Faster R-CNN models
# ---------------------------------------------------------------------------- #
def VGG16_faster_rcnn(model):
assert cfg.MODEL.FASTER_RCNN
return build_generic_detection_model(
model, VGG16.add_VGG16_conv5_body, VGG16.add_VGG16_roi_fc_head
)
def ResNet50_faster_rcnn(model):
assert cfg.MODEL.FASTER_RCNN
return build_generic_detection_model(
model, ResNet.add_ResNet50_conv4_body, ResNet.add_ResNet_roi_conv5_head
)
def ResNet101_faster_rcnn(model):
assert cfg.MODEL.FASTER_RCNN
return build_generic_detection_model(
model, ResNet.add_ResNet101_conv4_body, ResNet.add_ResNet_roi_conv5_head
)
# ---------------------------------------------------------------------------- #
# R-FCN models
# ---------------------------------------------------------------------------- #
def ResNet50_rfcn(model):
return build_generic_rfcn_model(
model, ResNet.add_ResNet50_conv5_body, dim_reduce=1024
)
def ResNet101_rfcn(model):
return build_generic_rfcn_model(
model, ResNet.add_ResNet101_conv5_body, dim_reduce=1024
)
| 35.061765 | 80 | 0.656824 |
7954c5cf95226d8c0b37dfdb0462536c44cdad48 | 1,318 | py | Python | lib/rucio/db/sqla/migrate_repo/versions/b818052fa670_add_index_to_quarantined_replicas.py | efajardo/rucio | 460f394715568b937584ef671382b2b93add1758 | [
"Apache-2.0"
] | 1 | 2019-03-04T09:09:42.000Z | 2019-03-04T09:09:42.000Z | lib/rucio/db/sqla/migrate_repo/versions/b818052fa670_add_index_to_quarantined_replicas.py | pujanm/rucio | 355a997a5ea213c427a5d841ab151ceb01073eb4 | [
"Apache-2.0"
] | null | null | null | lib/rucio/db/sqla/migrate_repo/versions/b818052fa670_add_index_to_quarantined_replicas.py | pujanm/rucio | 355a997a5ea213c427a5d841ab151ceb01073eb4 | [
"Apache-2.0"
] | 1 | 2021-06-17T14:15:15.000Z | 2021-06-17T14:15:15.000Z | # Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits, <martin.barisits@cern.ch>, 2018
#
# Add index to quarantined replicas
#
# Revision ID: b818052fa670
# Revises: 2962ece31cf4
# Create Date: 2018-03-07 14:45:46.484383
from alembic.op import (create_index, drop_index)
# revision identifiers, used by Alembic.
revision = 'b818052fa670' # pylint: disable=invalid-name
down_revision = '2962ece31cf4' # pylint: disable=invalid-name
def upgrade():
'''
upgrade method
'''
create_index('QUARANTINED_REPLICAS_PATH_IDX', 'quarantined_replicas', ['path', 'rse_id'], unique=True)
def downgrade():
'''
downgrade method
'''
drop_index('QUARANTINED_REPLICAS_PATH_IDX', 'quarantined_replicas')
| 29.954545 | 106 | 0.738998 |
7954c5ea132e9d1c02047a616998066f446cf1b1 | 24,774 | py | Python | glance_store/openstack/common/log.py | citrix-openstack-build/glance_store | 475d144cfe2a3fb5fc49dd0ad0a95fa90790f5b7 | [
"Apache-2.0"
] | null | null | null | glance_store/openstack/common/log.py | citrix-openstack-build/glance_store | 475d144cfe2a3fb5fc49dd0ad0a95fa90790f5b7 | [
"Apache-2.0"
] | null | null | null | glance_store/openstack/common/log.py | citrix-openstack-build/glance_store | 475d144cfe2a3fb5fc49dd0ad0a95fa90790f5b7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from glance_store.openstack.common.gettextutils import _
from glance_store.openstack.common import importutils
from glance_store.openstack.common import jsonutils
from glance_store.openstack.common import local
# NOTE(flaper87): Pls, remove when graduating this module
# from the incubator.
from glance_store.openstack.common.strutils import mask_password # noqa
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(
log_opts, logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
try:
handler = importutils.import_object(
"glance_store.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
except ImportError:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| 36.378855 | 81 | 0.609994 |
7954c6b156e9cabb190323fbbf260fce556e24cb | 13,200 | py | Python | Tests/test_edgecase.py | CarlGroth/TagScript | 14f0417a3f9a82b04c28c29570686260048de6ac | [
"CC-BY-4.0"
] | 3 | 2019-09-26T18:23:02.000Z | 2021-05-21T20:16:14.000Z | Tests/test_edgecase.py | CarlGroth/TagScript | 14f0417a3f9a82b04c28c29570686260048de6ac | [
"CC-BY-4.0"
] | null | null | null | Tests/test_edgecase.py | CarlGroth/TagScript | 14f0417a3f9a82b04c28c29570686260048de6ac | [
"CC-BY-4.0"
] | 2 | 2020-06-19T11:19:42.000Z | 2021-09-27T05:10:11.000Z | from ..TagScriptEngine import Verb, Interpreter, adapter, block, interface, WorkloadExceededError
import unittest
# Required third party blocks.
class ReplaceBlock(interface.Block):
def will_accept(self, ctx : Interpreter.Context):
dec = ctx.verb.declaration.lower()
return any([dec=="replace"])
def process(self, ctx : Interpreter.Context):
if ctx.verb.parameter is None:
return "TS Error: No join character supplied"
try:
before, after = ctx.verb.parameter.split(",", maxsplit=1)
except:
return "TS Error: Supply a before and after string"
return ctx.verb.payload.replace(before, after)
class PythonBlock(interface.Block):
def will_accept(self, ctx: Interpreter.Context):
dec = ctx.verb.declaration.lower()
return dec in ('contains', 'in', 'index')
def process(self, ctx: Interpreter.Context):
dec = ctx.verb.declaration.lower()
if dec == "contains":
return str(bool(ctx.verb.parameter in ctx.verb.payload.split())).lower()
elif dec == "in":
return str(bool(ctx.verb.parameter in ctx.verb.payload)).lower()
else:
try:
return str(ctx.verb.payload.strip().split().index(ctx.verb.parameter))
except ValueError:
return "-1"
class TestEdgeCases(unittest.TestCase):
def setUp(self):
self.blocks = [
block.MathBlock(),
block.RandomBlock(),
block.RangeBlock(),
block.AnyBlock(),
block.IfBlock(),
block.AllBlock(),
block.BreakBlock(),
block.StrfBlock(),
block.StopBlock(),
block.AssignmentBlock(),
block.FiftyFiftyBlock(),
block.ShortCutRedirectBlock("message"),
block.LooseVariableGetterBlock(),
block.SubstringBlock(),
PythonBlock(),
ReplaceBlock()
]
self.engine = Interpreter(self.blocks)
def tearDown(self):
self.blocks = None
self.engine = None
def test_specific_duplication(self):
# User submitted tag that messes things up.
script = """
{=(cancer1):𝓪 𝓫 𝓬 𝓭 𝓮 𝓯 𝓰 𝓱 𝓲 𝓳 𝓴 𝓵 𝓶 𝓷 𝓸 𝓹 𝓺 𝓻 𝓼 𝓽 𝓾 𝓿 𝔀 𝔁 𝔂 𝔃}
{=(cancer2):𝕒 𝕓 𝕔 𝕕 𝕖 𝕗 𝕘 𝕙 𝕚 𝕛 𝕜 𝕝 𝕞 𝕟 𝕠 𝕡 𝕢 𝕣 𝕤 𝕥 𝕦 𝕧 𝕨 𝕩 𝕪 𝕫}
{=(cancer3):a b c d e f g h i j k l m n o p q r s t u v w x y z}
{=(cancer4):ⓐ ⓑ ⓒ ⓓ ⓔ ⓕ ⓖ ⓗ ⓘ ⓙ ⓚ ⓛ ⓜ ⓝ ⓞ ⓟ ⓠ ⓡ ⓢ ⓣ ⓤ ⓥ ⓦ ⓧ ⓨ ⓩ}
{=(cancer5):🅐 🅑 🅒 🅓 🅔 🅕 🅖 🅗 🅘 🅙 🅚 🅛 🅜 🅝 🅞 🅟 🅠 🅡 🅢 🅣 🅤 🅥 🅦 🅧 🅨 🅩}
{=(cancer6):𝐚 𝐛 𝐜 𝐝 𝐞 𝐟 𝐠 𝐡 𝐢 𝐣 𝐤 𝐥 𝐦 𝐧 𝐨 𝐩 𝐪 𝐫 𝐬 𝐭 𝐮 𝐯 𝐰 𝐱 𝐲 𝐳}
{=(cancer7):𝖆 𝖇 𝖈 𝖉 𝖊 𝖋 𝖌 𝖍 𝖎 𝖏 𝖐 𝖑 𝖒 𝖓 𝖔 𝖕 𝖖 𝖗 𝖘 𝖙 𝖚 𝖛 𝖜 𝖝 𝖞 𝖟}
{=(cancer8):𝒂 𝒃 𝒄 𝒅 𝒆 𝒇 𝒈 𝒉 𝒊 𝒋 𝒌 𝒍 𝒎 𝒏 𝒐 𝒑 𝒒 𝒓 𝒔 𝒕 𝒖 𝒗 𝒘 𝒙 𝒚 𝒛}
{=(cancer9):𝚊 𝚋 𝚌 𝚍 𝚎 𝚏 𝚐 𝚑 𝚒 𝚓 𝚔 𝚕 𝚖 𝚗 𝚘 𝚙 𝚚 𝚛 𝚜 𝚝 𝚞 𝚟 𝚠 𝚡 𝚢 𝚣}
{=(cancer10):𝖺 𝖻 𝖼 𝖽 𝖾 𝖿 𝗀 𝗁 𝗂 𝗃 𝗄 𝗅 𝗆 𝗇 𝗈 𝗉 𝗊 𝗋 𝗌 𝗍 𝗎 𝗏 𝗐 𝗑 𝗒 𝗓}
{=(cancer11):𝗮 𝗯 𝗰 𝗱 𝗲 𝗳 𝗴 𝗵 𝗶 𝗷 𝗸 𝗹 𝗺 𝗻 𝗼 𝗽 𝗾 𝗿 𝘀 𝘁 𝘂 𝘃 𝘄 𝘅 𝘆 𝘇}
{=(cancer12):𝙖 𝙗 𝙘 𝙙 𝙚 𝙛 𝙜 𝙝 𝙞 𝙟 𝙠 𝙡 𝙢 𝙣 𝙤 𝙥 𝙦 𝙧 𝙨 𝙩 𝙪 𝙫 𝙬 𝙭 𝙮 𝙯}
{=(cancer13):𝘢 𝘣 𝘤 𝘥 𝘦 𝘧 𝘨 𝘩 𝘪 𝘫 𝘬 𝘭 𝘮 𝘯 𝘰 𝘱 𝘲 𝘳 𝘴 𝘵 𝘶 𝘷 𝘸 𝘹 𝘺 𝘻}
{=(cancer14):⒜ ⒝ ⒞ ⒟ ⒠ ⒡ ⒢ ⒣ ⒤ ⒥ ⒦ ⒧ ⒨ ⒩ ⒪ ⒫ ⒬ ⒭ ⒮ ⒯ ⒰ ⒱ ⒲ ⒳ ⒴ ⒵}
{=(cancer15):á b ć d é f ǵ h í j ḱ ĺ ḿ ń ő ṕ q ŕ ś t ú v ẃ x ӳ ź}
{=(cancer16):ค ๒ ƈ ɗ ﻉ ि ﻭ ɦ ٱ ﻝ ᛕ ɭ ๓ ก ѻ ρ ۹ ɼ ร Շ પ ۷ ฝ ซ ץ չ}
{=(cancer17):α в ¢ ∂ є ƒ ﻭ н ι נ к ℓ м η σ ρ ۹ я ѕ т υ ν ω χ у չ}
{=(cancer18):ค ๒ ς ๔ є Ŧ ﻮ ђ เ ן к ɭ ๓ ภ ๏ ק ợ г ร Շ ย ש ฬ א ץ չ}
{=(cancer19):а ъ с ↁ э f Б Ђ і ј к l м и о р q ѓ ѕ т ц v ш х Ў z}
{=(cancer20):ል ጌ ር ዕ ቿ ቻ ኗ ዘ ጎ ጋ ጕ ረ ጠ ክ ዐ የ ዒ ዪ ነ ፕ ሁ ሀ ሠ ሸ ሃ ጊ}
{=(cancer21):𝔞 𝔟 𝔠 𝔡 𝔢 𝔣 𝔤 𝔥 𝔦 𝔧 𝔨 𝔩 𝔪 𝔫 𝔬 𝔭 𝔮 𝔯 𝔰 𝔱 𝔲 𝔳 𝔴 𝔵 𝔶 𝔷}
{=(cancer22):ä ḅ ċ ḋ ë ḟ ġ ḧ ï j ḳ ḷ ṁ ṅ ö ṗ q ṛ ṡ ẗ ü ṿ ẅ ẍ ÿ ż}
{=(cancer23):Ⱥ ƀ ȼ đ ɇ f ǥ ħ ɨ ɉ ꝁ ł m n ø ᵽ ꝗ ɍ s ŧ ᵾ v w x ɏ ƶ}
{=(uppercasesplit):comment variable}
{=(cancer24):𝓐 𝓑 𝓒 𝓓 𝓔 𝓕 𝓖 𝓗 𝓘 𝓙 𝓚 𝓛 𝓜 𝓝 𝓞 𝓟 𝓠 𝓡 𝓢 𝓣 𝓤 𝓥 𝓦 𝓧 𝓨 𝓩}
{=(cancer25):𝔸 𝔹 ℂ 𝔻 𝔼 𝔽 𝔾 ℍ 𝕀 𝕁 𝕂 𝕃 𝕄 ℕ 𝕆 ℙ ℚ ℝ 𝕊 𝕋 𝕌 𝕍 𝕎 𝕏 𝕐 ℤ}
{=(cancer26):Ⓐ Ⓑ Ⓒ Ⓓ Ⓔ Ⓕ Ⓖ Ⓗ Ⓘ Ⓙ Ⓚ Ⓛ Ⓜ Ⓝ Ⓞ Ⓟ Ⓠ Ⓡ Ⓢ Ⓣ Ⓤ Ⓥ Ⓦ Ⓧ Ⓨ Ⓩ}
{=(cancer27):🅐 🅑 🅒 🅓 🅔 🅕 🅖 🅗 🅘 🅙 🅚 🅛 🅜 🅝 🅞 🅟 🅠 🅡 🅢 🅣 🅤 🅥 🅦 🅧 🅨 🅩}
{=(cancer28):A B C D E F G H I J K L M N O P Q R S T U V W X Y Z}
{=(cancer29):𝐀 𝐁 𝐂 𝐃 𝐄 𝐅 𝐆 𝐇 𝐈 𝐉 𝐊 𝐋 𝐌 𝐍 𝐎 𝐏 𝐐 𝐑 𝐒 𝐓 𝐔 𝐕 𝐖 𝐗 𝐘 𝐙}
{=(cancer30):𝕬 𝕭 𝕮 𝕯 𝕰 𝕱 𝕲 𝕳 𝕴 𝕵 𝕶 𝕷 𝕸 𝕹 𝕺 𝕻 𝕼 𝕽 𝕾 𝕿 𝖀 𝖁 𝖂 𝖃 𝖄 𝖅}
{=(cancer31):𝑨 𝑩 𝑪 𝑫 𝑬 𝑭 𝑮 𝑯 𝑰 𝑱 𝑲 𝑳 𝑴 𝑵 𝑶 𝑷 𝑸 𝑹 𝑺 𝑻 𝑼 𝑽 𝑾 𝑿 𝒀 𝒁}
{=(cancer32):𝖠 𝖡 𝖢 𝖣 𝖤 𝖥 𝖦 𝖧 𝖨 𝖩 𝖪 𝖫 𝖬 𝖭 𝖮 𝖯 𝖰 𝖱 𝖲 𝖳 𝖴 𝖵 𝖶 𝖷 𝖸 𝖹}
{=(cancer33):𝙰 𝙱 𝙲 𝙳 𝙴 𝙵 𝙶 𝙷 𝙸 𝙹 𝙺 𝙻 𝙼 𝙽 𝙾 𝙿 𝚀 𝚁 𝚂 𝚃 𝚄 𝚅 𝚆 𝚇 𝚈 𝚉}
{=(cancer34):𝗔 𝗕 𝗖 𝗗 𝗘 𝗙 𝗚 𝗛 𝗜 𝗝 𝗞 𝗟 𝗠 𝗡 𝗢 𝗣 𝗤 𝗥 𝗦 𝗧 𝗨 𝗩 𝗪 𝗫 𝗬 𝗭}
{=(cancer35):𝘼 𝘽 𝘾 𝘿 𝙀 𝙁 𝙂 𝙃 𝙄 𝙅 𝙆 𝙇 𝙈 𝙉 𝙊 𝙋 𝙌 𝙍 𝙎 𝙏 𝙐 𝙑 𝙒 𝙓 𝙔 𝙕}
{=(cancer36):𝘈 𝘉 𝘊 𝘋 𝘌 𝘍 𝘎 𝘏 𝘐 𝘑 𝘒 𝘓 𝘔 𝘕 𝘖 𝘗 𝘘 𝘙 𝘚 𝘛 𝘜 𝘝 𝘞 𝘟 𝘠 𝘡}
{=(cancer37):🇦 🇧 🇨 🇩 🇪 🇫 🇬 🇭 🇮 🇯 🇰 🇱 🇲 🇳 🇴 🇵 🇶 🇷 🇸 🇹 🇺 🇻 🇼 🇽 🇾 🇿}
{=(cancer38):🄰 🄱 🄲 🄳 🄴 🄵 🄶 🄷 🄸 🄹 🄺 🄻 🄼 🄽 🄾 🄿 🅀 🅁 🅂 🅃 🅄 🅅 🅆 🅇 🅈 🅉}
{=(cancer39):🅰 🅱 🅲 🅳 🅴 🅵 🅶 🅷 🅸 🅹 🅺 🅻 🅼 🅽 🅾 🅿 🆀 🆁 🆂 🆃 🆄 🆅 🆆 🆇 🆈 🆉}
{=(cancer40):Á B Ć D É F Ǵ H í J Ḱ Ĺ Ḿ Ń Ő Ṕ Q Ŕ ś T Ű V Ẃ X Ӳ Ź}
{=(cancer41):Д Б Ҁ ↁ Є F Б Н І Ј Ќ L М И Ф Р Q Я Ѕ Г Ц V Щ Ж Ч Z}
{=(cancer42):𝔄 𝔅 ℭ 𝔇 𝔈 𝔉 𝔊 ℌ ℑ 𝔍 𝔎 𝔏 𝔐 𝔑 𝔒 𝔓 𝔔 ℜ 𝔖 𝔗 𝔘 𝔙 𝔚 𝔛 𝔜 ℨ}
{=(cancer43):Ä Ḅ Ċ Ḋ Ё Ḟ Ġ Ḧ Ї J Ḳ Ḷ Ṁ Ṅ Ö Ṗ Q Ṛ Ṡ Ṫ Ü Ṿ Ẅ Ẍ Ÿ Ż}
{=(cancer44):Ⱥ Ƀ Ȼ Đ Ɇ F Ǥ Ħ Ɨ Ɉ Ꝁ Ł M N Ø Ᵽ Ꝗ Ɍ S Ŧ ᵾ V W X Ɏ Ƶ}
{=(cancer45):ᴀ ʙ ᴄ ᴅ ᴇ ғ ɢ ʜ ɪ ᴊ ᴋ ʟ ᴍ ɴ ᴏ ᴘ ǫ ʀ s ᴛ ᴜ ᴠ ᴡ x ʏ ᴢ}
{=(cancer):{cancer1} {cancer2} {cancer3} {cancer4} {cancer5} {cancer6} {cancer7} {cancer8} {cancer9} {cancer10} {cancer11} {cancer12} {cancer13} {cancer14} {cancer15} {cancer16} {cancer17} {cancer18} {cancer19} {cancer20} {cancer21} {cancer22} {cancer23} {cancer24} {cancer25} {cancer26} {cancer27} {cancer28} {cancer29} {cancer30} {cancer31} {cancer32} {cancer33} {cancer34} {cancer35} {cancer36} {cancer37} {cancer38} {cancer39} {cancer40} {cancer41} {cancer42} {cancer43} {cancer44} {cancer45}}
{=(referencemap):a b c d e f g h i j k l m n o p q r s t u v w x y z}
{=(username):{replace(, ):{target}}}
{=(username):{if({contains({username(2)}):{cancer}}==true):{replace({username(2)},{{if({m:trunc({index({username(2)}):{cancer}}+1)}>598):upper|lower}:{referencemap({m:trunc(({index({username(2)}):{cancer}}+1)%26)})}}):{username}}|{username}}}
{=(username):{if({contains({username(3)}):{cancer}}==true):{replace({username(3)},{referencemap({m:trunc(({index({username(3)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(4)}):{cancer}}==true):{replace({username(4)},{referencemap({m:trunc(({index({username(4)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(5)}):{cancer}}==true):{replace({username(5)},{referencemap({m:trunc(({index({username(5)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(6)}):{cancer}}==true):{replace({username(6)},{referencemap({m:trunc(({index({username(6)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(7)}):{cancer}}==true):{replace({username(7)},{referencemap({m:trunc(({index({username(7)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(8)}):{cancer}}==true):{replace({username(8)},{referencemap({m:trunc(({index({username(8)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(9)}):{cancer}}==true):{replace({username(9)},{referencemap({m:trunc(({index({username(9)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(10)}):{cancer}}==true):{replace({username(10)},{referencemap({m:trunc(({index({username(10)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(11)}):{cancer}}==true):{replace({username(11)},{referencemap({m:trunc(({index({username(11)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(12)}):{cancer}}==true):{replace({username(12)},{referencemap({m:trunc(({index({username(12)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(13)}):{cancer}}==true):{replace({username(13)},{referencemap({m:trunc(({index({username(13)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(14)}):{cancer}}==true):{replace({username(14)},{referencemap({m:trunc(({index({username(14)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(15)}):{cancer}}==true):{replace({username(15)},{referencemap({m:trunc(({index({username(15)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(16)}):{cancer}}==true):{replace({username(16)},{referencemap({m:trunc(({index({username(16)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(17)}):{cancer}}==true):{replace({username(17)},{referencemap({m:trunc(({index({username(17)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(18)}):{cancer}}==true):{replace({username(18)},{referencemap({m:trunc(({index({username(18)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(19)}):{cancer}}==true):{replace({username(19)},{referencemap({m:trunc(({index({username(19)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(20)}):{cancer}}==true):{replace({username(20)},{referencemap({m:trunc(({index({username(20)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(21)}):{cancer}}==true):{replace({username(21)},{referencemap({m:trunc(({index({username(21)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(22)}):{cancer}}==true):{replace({username(22)},{referencemap({m:trunc(({index({username(22)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(23)}):{cancer}}==true):{replace({username(23)},{referencemap({m:trunc(({index({username(23)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(24)}):{cancer}}==true):{replace({username(24)},{referencemap({m:trunc(({index({username(24)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(25)}):{cancer}}==true):{replace({username(25)},{referencemap({m:trunc(({index({username(25)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(26)}):{cancer}}==true):{replace({username(26)},{referencemap({m:trunc(({index({username(26)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(27)}):{cancer}}==true):{replace({username(27)},{referencemap({m:trunc(({index({username(27)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(28)}):{cancer}}==true):{replace({username(28)},{referencemap({m:trunc(({index({username(28)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(29)}):{cancer}}==true):{replace({username(29)},{referencemap({m:trunc(({index({username(29)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(30)}):{cancer}}==true):{replace({username(30)},{referencemap({m:trunc(({index({username(30)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(username):{if({contains({username(31)}):{cancer}}==true):{replace({username(31)},{referencemap({m:trunc(({index({username(31)}):{cancer}}+1)%26)})}):{username}}|{username}}}
{=(error):You can't change your own nickname with Carlbot. Please mention somebody after the tag invocation.}
{c:{if({target(id)}=={user(id)}):choose {error},{error}|setnick {target(id)} {join():{username}}}}
"""
data = {
"target":adapter.StringAdapter("Basic Username")
}
result = self.engine.process(script, data).body
print(result)
self.assertTrue(len(result) < 150)
def test_recursion(self):
script = """
{=(recursion):lol}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{=(recursion):{recursion}{recursion}}
{recursion}
"""
data = {
"target":adapter.StringAdapter("Basic Username")
}
with self.assertRaises(WorkloadExceededError):
self.engine.process(script, data, 2000) | 68.041237 | 497 | 0.580833 |
7954c6cfb28b70ed897cc19c86f20fb3b08bb9e5 | 6,389 | py | Python | ducky/devices/keyboard.py | happz/ducky | 1c6a875ca5a7a9cc71836bad5b7e45cc398d42ad | [
"MIT"
] | 3 | 2015-04-25T18:25:37.000Z | 2017-08-31T20:52:29.000Z | ducky/devices/keyboard.py | happz/ducky-legacy | 1c6a875ca5a7a9cc71836bad5b7e45cc398d42ad | [
"MIT"
] | 27 | 2015-01-06T21:59:22.000Z | 2016-11-12T07:31:39.000Z | ducky/devices/keyboard.py | happz/ducky-legacy | 1c6a875ca5a7a9cc71836bad5b7e45cc398d42ad | [
"MIT"
] | 1 | 2017-05-14T18:52:34.000Z | 2017-05-14T18:52:34.000Z | """
Keyboard controller - provides events for pressed and released keys.
"""
import enum
import io
from collections import deque
from . import DeviceFrontend, DeviceBackend, MMIOMemoryPage
from ..errors import InvalidResourceError
from ..mm import UINT8_FMT, addr_to_page, UINT32_FMT, u32_t
from ..hdt import HDTEntry_Device
DEFAULT_IRQ = 0x01
DEFAULT_MMIO_ADDRESS = 0x8000
class KeyboardPorts(enum.IntEnum):
STATUS = 0x00
DATA = 0x01
LAST = 0x01
class HDTEntry_Keyboard(HDTEntry_Device):
_fields_ = HDTEntry_Device.ENTRY_HEADER + [
('mmio_address', u32_t)
]
def __init__(self, logger, config, section):
super(HDTEntry_Keyboard, self).__init__(logger, section, 'Virtual keyboard controller')
self.mmio_address = config.getint(section, 'mmio-address', DEFAULT_MMIO_ADDRESS)
logger.debug('%s: mmio-address=%s', self.__class__.__name__, UINT32_FMT(self.mmio_address))
class KeyboardMMIOMemoryPage(MMIOMemoryPage):
def read_u8(self, offset):
self.DEBUG('%s.read_u8: offset=%s', self.__class__.__name__, UINT8_FMT(offset))
if offset == KeyboardPorts.STATUS:
return 0x00
if offset == KeyboardPorts.DATA:
b = self._device._read_char()
if b is None:
self.DEBUG('%s.get: empty input, signal it downstream', self.__class__.__name__)
return 0xFF
self.DEBUG('%s.get: input byte is %i', self.__class__.__name__, b)
return b
self.WARN('%s.read_u8: attempt to read raw offset: offset=%s', self.__class__.__name__, UINT8_FMT(offset))
return 0x00
class ControlMessages(enum.IntEnum):
HALT = 1025
CONTROL_MESSAGE_FIRST = 1024
class Frontend(DeviceFrontend):
def __init__(self, machine, name):
super(Frontend, self).__init__(machine, 'input', name)
self._comm_queue = machine.comm_channel.get_queue(name)
self._streams = []
self._stream = None
self.backend = machine.get_device_by_name(name)
@staticmethod
def create_from_config(machine, config, section):
slave = config.get(section, 'slave', default = section)
return Frontend(machine, slave)
def boot(self):
super(Frontend, self).boot()
self._open_input()
self.backend.boot()
def halt(self):
self._close_input()
self.backend.halt()
super(Frontend, self).halt()
def enqueue_stream(self, stream):
self.machine.DEBUG('%s.enqueue_input: stream=%s', self.__class__.__name__, stream)
if not stream.has_poll_support():
raise InvalidResourceError('Keyboard stream must support polling')
self._streams.append(stream)
def _close_input(self):
self.machine.DEBUG('%s._close_input: input=%s', self.__class__.__name__, self._stream)
if self._stream is None:
return
self._stream.unregister_with_reactor(self.machine.reactor)
self._stream = None
def _open_input(self):
self.machine.DEBUG('%s._open_input', self.__class__.__name__)
self._close_input()
if not self._streams:
self.machine.DEBUG('%s._open_input: no additional input streams', self.__class__.__name__)
self._comm_queue.write_in(ControlMessages.HALT)
# if not self.queue or self.queue[-1] != ControlMessages.HALT:
# self.machine.DEBUG('signal halt')
# self.queue.append(ControlMessages.HALT)
return
self._stream = self._streams.pop(0)
self.machine.DEBUG('%s._open_input: stream=%r', self.__class__.__name__, self._stream)
self._stream.register_with_reactor(self.machine.reactor, on_read = self._handle_raw_input, on_error = self._handle_input_error)
def _handle_input_error(self):
self.machine.DEBUG('%s._handle_input_error')
self._open_input()
def _handle_raw_input(self):
self.machine.DEBUG('%s._handle_raw_input', self.__class__.__name__)
assert self._stream is not False
buff = self._stream.read(size = io.DEFAULT_BUFFER_SIZE)
self.machine.DEBUG('%s._handle_raw_input: buff=%s (%s)', self.__class__.__name__, buff, type(buff))
if buff is None:
self.machine.DEBUG('%s._handle_raw_input: nothing to do, no input', self.__class__.__name__)
return
if not buff:
# EOF
self._open_input()
return
self.machine.DEBUG('%s._handle_raw_input: adding %i chars', self.__class__.__name__, len(buff))
self._comm_queue.write_in(buff)
self.machine.trigger_irq(self.backend)
class Backend(DeviceBackend):
def __init__(self, machine, name, mmio_address = None, irq = None):
super(Backend, self).__init__(machine, 'input', name)
self._mmio_address = mmio_address or DEFAULT_MMIO_ADDRESS
self._mmio_page = None
self.irq = irq or DEFAULT_IRQ
self._comm_queue = machine.comm_channel.create_queue(name)
self._key_queue = deque()
@staticmethod
def create_from_config(machine, config, section):
return Backend(machine, section,
mmio_address = config.getint(section, 'mmio-address', DEFAULT_MMIO_ADDRESS),
irq = config.getint(section, 'irq', DEFAULT_IRQ))
@staticmethod
def create_hdt_entries(logger, config, section):
return [HDTEntry_Keyboard(logger, config, section)]
def __repr__(self):
return 'basic keyboard controller on [%s] as %s' % (UINT32_FMT(self._mmio_address), self.name)
def boot(self):
self.machine.DEBUG('%s.boot', self.__class__.__name__)
self._mmio_page = KeyboardMMIOMemoryPage(self, self.machine.memory, addr_to_page(self._mmio_address))
self.machine.memory.register_page(self._mmio_page)
self.machine.tenh('hid: %s', self)
def halt(self):
self.machine.DEBUG('%s.halt', self.__class__.__name__)
self.machine.memory.unregister_page(self._mmio_page)
def _process_input_events(self):
self.machine.DEBUG('%s.__process_input_events', self.__class__.__name__)
while True:
e = self._comm_queue.read_in()
if e is None:
return
if isinstance(e, (list, bytearray, bytes)):
for key in e:
self._key_queue.append(key)
elif isinstance(e, ControlMessages):
self._key_queue.append(e)
else:
raise InvalidResourceError('Unknown message: e=%s, type=%s' % (e, type(e)))
def _read_char(self):
q = self._key_queue
if not q:
self._process_input_events()
if not q:
return None
b = q.popleft()
if b == ControlMessages.HALT:
self.machine.halt()
return None
return b
| 28.522321 | 131 | 0.702301 |
7954c7d42d1b96cfa7333e265176f992cd814610 | 2,909 | py | Python | fastestimator/util/data.py | TortoiseHam/fastestimator | 97b9fe134a8b5cc3cf21e84c782d1149eecfa3cc | [
"Apache-2.0"
] | 1 | 2019-10-03T00:40:12.000Z | 2019-10-03T00:40:12.000Z | fastestimator/util/data.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | null | null | null | fastestimator/util/data.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any, ChainMap, Dict, List, MutableMapping, Optional
class Data(ChainMap[str, Any]):
"""A class which contains prediction and batch data.
This class is intentionally not @traceable.
Data objects can be interacted with as if they are regular dictionaries. They are however, actually a combination of
two dictionaries, a dictionary for trace communication and a dictionary of prediction+batch data. In general, data
written into the trace dictionary will be logged by the system, whereas data in the pred+batch dictionary will not.
We therefore provide helper methods to write entries into `Data` which are intended or not intended for logging.
```python
d = fe.util.Data({"a":0, "b":1, "c":2})
a = d["a"] # 0
d.write_with_log("d", 3)
d.write_without_log("e", 5)
d.write_with_log("a", 4)
a = d["a"] # 4
r = d.read_logs(extra_keys={"c"}) # {"c":2, "d":3, "a":4}
```
Args:
batch_data: The batch data dictionary. In practice this is itself often a ChainMap containing separate
prediction and batch dictionaries.
"""
maps: List[MutableMapping[str, Any]]
def __init__(self, batch_data: Optional[MutableMapping[str, Any]] = None) -> None:
super().__init__({}, batch_data or {})
def write_with_log(self, key: str, value: Any) -> None:
"""Write a given `value` into the `Data` dictionary with the intent that it be logged.
Args:
key: The key to associate with the new entry.
value: The new entry to be written.
"""
self.__setitem__(key, value)
def write_without_log(self, key: str, value: Any) -> None:
"""Write a given `value` into the `Data` dictionary with the intent that it not be logged.
Args:
key: The key to associate with the new entry.
value: The new entry to be written.
"""
self.maps[1][key] = value
def read_logs(self) -> Dict[str, Any]:
"""Read all values from the `Data` dictionary which were intended to be logged.
Returns:
A dictionary of all of the keys and values to be logged.
"""
return self.maps[0]
| 40.402778 | 120 | 0.648677 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.