body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
7878dd6ab4fe97249a29ae73bec00d897639037f7d20aa9835faf5d052a426cc
|
@distributed_trace
def create_or_update(self, resource_group_name: str, environment_name: str, component_name: str, dapr_component_envelope: '_models.DaprComponent', **kwargs: Any) -> '_models.DaprComponent':
'Creates or updates a Dapr Component.\n\n Creates or updates a Dapr Component in a Managed Environment.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :param dapr_component_envelope: Configuration details of the Dapr Component.\n :type dapr_component_envelope: ~azure.mgmt.appcontainers.models.DaprComponent\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DaprComponent, or the result of cls(response)\n :rtype: ~azure.mgmt.appcontainers.models.DaprComponent\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
content_type = kwargs.pop('content_type', 'application/json')
_json = self._serialize.body(dapr_component_envelope, 'DaprComponent')
request = build_create_or_update_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, content_type=content_type, json=_json, template_url=self.create_or_update.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DaprComponent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
Creates or updates a Dapr Component.
Creates or updates a Dapr Component in a Managed Environment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param environment_name: Name of the Managed Environment.
:type environment_name: str
:param component_name: Name of the Dapr Component.
:type component_name: str
:param dapr_component_envelope: Configuration details of the Dapr Component.
:type dapr_component_envelope: ~azure.mgmt.appcontainers.models.DaprComponent
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DaprComponent, or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.DaprComponent
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/appcontainers/azure-mgmt-appcontainers/azure/mgmt/appcontainers/operations/_dapr_components_operations.py
|
create_or_update
|
AikoBB/azure-sdk-for-python
| 1
|
python
|
@distributed_trace
def create_or_update(self, resource_group_name: str, environment_name: str, component_name: str, dapr_component_envelope: '_models.DaprComponent', **kwargs: Any) -> '_models.DaprComponent':
'Creates or updates a Dapr Component.\n\n Creates or updates a Dapr Component in a Managed Environment.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :param dapr_component_envelope: Configuration details of the Dapr Component.\n :type dapr_component_envelope: ~azure.mgmt.appcontainers.models.DaprComponent\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DaprComponent, or the result of cls(response)\n :rtype: ~azure.mgmt.appcontainers.models.DaprComponent\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
content_type = kwargs.pop('content_type', 'application/json')
_json = self._serialize.body(dapr_component_envelope, 'DaprComponent')
request = build_create_or_update_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, content_type=content_type, json=_json, template_url=self.create_or_update.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DaprComponent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
@distributed_trace
def create_or_update(self, resource_group_name: str, environment_name: str, component_name: str, dapr_component_envelope: '_models.DaprComponent', **kwargs: Any) -> '_models.DaprComponent':
'Creates or updates a Dapr Component.\n\n Creates or updates a Dapr Component in a Managed Environment.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :param dapr_component_envelope: Configuration details of the Dapr Component.\n :type dapr_component_envelope: ~azure.mgmt.appcontainers.models.DaprComponent\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DaprComponent, or the result of cls(response)\n :rtype: ~azure.mgmt.appcontainers.models.DaprComponent\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
content_type = kwargs.pop('content_type', 'application/json')
_json = self._serialize.body(dapr_component_envelope, 'DaprComponent')
request = build_create_or_update_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, content_type=content_type, json=_json, template_url=self.create_or_update.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DaprComponent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized<|docstring|>Creates or updates a Dapr Component.
Creates or updates a Dapr Component in a Managed Environment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param environment_name: Name of the Managed Environment.
:type environment_name: str
:param component_name: Name of the Dapr Component.
:type component_name: str
:param dapr_component_envelope: Configuration details of the Dapr Component.
:type dapr_component_envelope: ~azure.mgmt.appcontainers.models.DaprComponent
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DaprComponent, or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.DaprComponent
:raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
|
333f8e6093908d9d5044c16fd194566e1af4d875b1f759af051e9c714cd815d9
|
@distributed_trace
def delete(self, resource_group_name: str, environment_name: str, component_name: str, **kwargs: Any) -> None:
'Delete a Dapr Component.\n\n Delete a Dapr Component from a Managed Environment.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
request = build_delete_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, template_url=self.delete.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
|
Delete a Dapr Component.
Delete a Dapr Component from a Managed Environment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param environment_name: Name of the Managed Environment.
:type environment_name: str
:param component_name: Name of the Dapr Component.
:type component_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/appcontainers/azure-mgmt-appcontainers/azure/mgmt/appcontainers/operations/_dapr_components_operations.py
|
delete
|
AikoBB/azure-sdk-for-python
| 1
|
python
|
@distributed_trace
def delete(self, resource_group_name: str, environment_name: str, component_name: str, **kwargs: Any) -> None:
'Delete a Dapr Component.\n\n Delete a Dapr Component from a Managed Environment.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
request = build_delete_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, template_url=self.delete.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
|
@distributed_trace
def delete(self, resource_group_name: str, environment_name: str, component_name: str, **kwargs: Any) -> None:
'Delete a Dapr Component.\n\n Delete a Dapr Component from a Managed Environment.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
request = build_delete_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, template_url=self.delete.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})<|docstring|>Delete a Dapr Component.
Delete a Dapr Component from a Managed Environment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param environment_name: Name of the Managed Environment.
:type environment_name: str
:param component_name: Name of the Dapr Component.
:type component_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
|
ca511e2dc2c56605cb1aa292d2cffd5fc8eeb829c35a01532fef02d69d9e6218
|
@distributed_trace
def list_secrets(self, resource_group_name: str, environment_name: str, component_name: str, **kwargs: Any) -> '_models.DaprSecretsCollection':
'List secrets for a dapr component.\n\n List secrets for a dapr component.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DaprSecretsCollection, or the result of cls(response)\n :rtype: ~azure.mgmt.appcontainers.models.DaprSecretsCollection\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
request = build_list_secrets_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, template_url=self.list_secrets.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DaprSecretsCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
List secrets for a dapr component.
List secrets for a dapr component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param environment_name: Name of the Managed Environment.
:type environment_name: str
:param component_name: Name of the Dapr Component.
:type component_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DaprSecretsCollection, or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.DaprSecretsCollection
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/appcontainers/azure-mgmt-appcontainers/azure/mgmt/appcontainers/operations/_dapr_components_operations.py
|
list_secrets
|
AikoBB/azure-sdk-for-python
| 1
|
python
|
@distributed_trace
def list_secrets(self, resource_group_name: str, environment_name: str, component_name: str, **kwargs: Any) -> '_models.DaprSecretsCollection':
'List secrets for a dapr component.\n\n List secrets for a dapr component.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DaprSecretsCollection, or the result of cls(response)\n :rtype: ~azure.mgmt.appcontainers.models.DaprSecretsCollection\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
request = build_list_secrets_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, template_url=self.list_secrets.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DaprSecretsCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
@distributed_trace
def list_secrets(self, resource_group_name: str, environment_name: str, component_name: str, **kwargs: Any) -> '_models.DaprSecretsCollection':
'List secrets for a dapr component.\n\n List secrets for a dapr component.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param environment_name: Name of the Managed Environment.\n :type environment_name: str\n :param component_name: Name of the Dapr Component.\n :type component_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: DaprSecretsCollection, or the result of cls(response)\n :rtype: ~azure.mgmt.appcontainers.models.DaprSecretsCollection\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', '2022-03-01')
request = build_list_secrets_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, environment_name=environment_name, component_name=component_name, api_version=api_version, template_url=self.list_secrets.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DaprSecretsCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized<|docstring|>List secrets for a dapr component.
List secrets for a dapr component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param environment_name: Name of the Managed Environment.
:type environment_name: str
:param component_name: Name of the Dapr Component.
:type component_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DaprSecretsCollection, or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.DaprSecretsCollection
:raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
|
c78b79f07d0b3638eb97b1b4094d843bf569785a75fd44a2ab3981186841a0f7
|
def __init__(self, id=None, customer_id=None, customer_identifier=None, connection_ids=None, status=None, created_at=None):
'BasicReport - a model defined in Swagger'
self._id = None
self._customer_id = None
self._customer_identifier = None
self._connection_ids = None
self._status = None
self._created_at = None
self.discriminator = None
self.id = id
self.customer_id = customer_id
self.customer_identifier = customer_identifier
self.connection_ids = connection_ids
self.status = status
self.created_at = created_at
|
BasicReport - a model defined in Swagger
|
third_party/saltedge/swagger_client/models/basic_report.py
|
__init__
|
ltowarek/budget-supervisor
| 1
|
python
|
def __init__(self, id=None, customer_id=None, customer_identifier=None, connection_ids=None, status=None, created_at=None):
self._id = None
self._customer_id = None
self._customer_identifier = None
self._connection_ids = None
self._status = None
self._created_at = None
self.discriminator = None
self.id = id
self.customer_id = customer_id
self.customer_identifier = customer_identifier
self.connection_ids = connection_ids
self.status = status
self.created_at = created_at
|
def __init__(self, id=None, customer_id=None, customer_identifier=None, connection_ids=None, status=None, created_at=None):
self._id = None
self._customer_id = None
self._customer_identifier = None
self._connection_ids = None
self._status = None
self._created_at = None
self.discriminator = None
self.id = id
self.customer_id = customer_id
self.customer_identifier = customer_identifier
self.connection_ids = connection_ids
self.status = status
self.created_at = created_at<|docstring|>BasicReport - a model defined in Swagger<|endoftext|>
|
c0e14bc2c4214d68a669b62f0b6de6ecb899a40cdb7e1e9fc801e4d7b456c91e
|
@property
def id(self):
"Gets the id of this BasicReport. # noqa: E501\n\n the `id` of the general report generated based on the customer's data # noqa: E501\n\n :return: The id of this BasicReport. # noqa: E501\n :rtype: str\n "
return self._id
|
Gets the id of this BasicReport. # noqa: E501
the `id` of the general report generated based on the customer's data # noqa: E501
:return: The id of this BasicReport. # noqa: E501
:rtype: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
id
|
ltowarek/budget-supervisor
| 1
|
python
|
@property
def id(self):
"Gets the id of this BasicReport. # noqa: E501\n\n the `id` of the general report generated based on the customer's data # noqa: E501\n\n :return: The id of this BasicReport. # noqa: E501\n :rtype: str\n "
return self._id
|
@property
def id(self):
"Gets the id of this BasicReport. # noqa: E501\n\n the `id` of the general report generated based on the customer's data # noqa: E501\n\n :return: The id of this BasicReport. # noqa: E501\n :rtype: str\n "
return self._id<|docstring|>Gets the id of this BasicReport. # noqa: E501
the `id` of the general report generated based on the customer's data # noqa: E501
:return: The id of this BasicReport. # noqa: E501
:rtype: str<|endoftext|>
|
aa65ff16c68908f54fabb3d26d4cd1c9aa1b91ec8d2dc4b1f734a7d491077098
|
@id.setter
def id(self, id):
"Sets the id of this BasicReport.\n\n the `id` of the general report generated based on the customer's data # noqa: E501\n\n :param id: The id of this BasicReport. # noqa: E501\n :type: str\n "
if (id is None):
raise ValueError('Invalid value for `id`, must not be `None`')
self._id = id
|
Sets the id of this BasicReport.
the `id` of the general report generated based on the customer's data # noqa: E501
:param id: The id of this BasicReport. # noqa: E501
:type: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
id
|
ltowarek/budget-supervisor
| 1
|
python
|
@id.setter
def id(self, id):
"Sets the id of this BasicReport.\n\n the `id` of the general report generated based on the customer's data # noqa: E501\n\n :param id: The id of this BasicReport. # noqa: E501\n :type: str\n "
if (id is None):
raise ValueError('Invalid value for `id`, must not be `None`')
self._id = id
|
@id.setter
def id(self, id):
"Sets the id of this BasicReport.\n\n the `id` of the general report generated based on the customer's data # noqa: E501\n\n :param id: The id of this BasicReport. # noqa: E501\n :type: str\n "
if (id is None):
raise ValueError('Invalid value for `id`, must not be `None`')
self._id = id<|docstring|>Sets the id of this BasicReport.
the `id` of the general report generated based on the customer's data # noqa: E501
:param id: The id of this BasicReport. # noqa: E501
:type: str<|endoftext|>
|
709840bfbccf8381a5106229c9f7d6b8905fcfaf2cd0826d63ab33ac1f00780f
|
@property
def customer_id(self):
'Gets the customer_id of this BasicReport. # noqa: E501\n\n the `id` of the [customer](#customers) for which the report has been requested # noqa: E501\n\n :return: The customer_id of this BasicReport. # noqa: E501\n :rtype: str\n '
return self._customer_id
|
Gets the customer_id of this BasicReport. # noqa: E501
the `id` of the [customer](#customers) for which the report has been requested # noqa: E501
:return: The customer_id of this BasicReport. # noqa: E501
:rtype: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
customer_id
|
ltowarek/budget-supervisor
| 1
|
python
|
@property
def customer_id(self):
'Gets the customer_id of this BasicReport. # noqa: E501\n\n the `id` of the [customer](#customers) for which the report has been requested # noqa: E501\n\n :return: The customer_id of this BasicReport. # noqa: E501\n :rtype: str\n '
return self._customer_id
|
@property
def customer_id(self):
'Gets the customer_id of this BasicReport. # noqa: E501\n\n the `id` of the [customer](#customers) for which the report has been requested # noqa: E501\n\n :return: The customer_id of this BasicReport. # noqa: E501\n :rtype: str\n '
return self._customer_id<|docstring|>Gets the customer_id of this BasicReport. # noqa: E501
the `id` of the [customer](#customers) for which the report has been requested # noqa: E501
:return: The customer_id of this BasicReport. # noqa: E501
:rtype: str<|endoftext|>
|
df962bdedc8997d40697ec251b6e6daa7e2bb868924dbece77fe9dbfe7ce0fb4
|
@customer_id.setter
def customer_id(self, customer_id):
'Sets the customer_id of this BasicReport.\n\n the `id` of the [customer](#customers) for which the report has been requested # noqa: E501\n\n :param customer_id: The customer_id of this BasicReport. # noqa: E501\n :type: str\n '
if (customer_id is None):
raise ValueError('Invalid value for `customer_id`, must not be `None`')
self._customer_id = customer_id
|
Sets the customer_id of this BasicReport.
the `id` of the [customer](#customers) for which the report has been requested # noqa: E501
:param customer_id: The customer_id of this BasicReport. # noqa: E501
:type: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
customer_id
|
ltowarek/budget-supervisor
| 1
|
python
|
@customer_id.setter
def customer_id(self, customer_id):
'Sets the customer_id of this BasicReport.\n\n the `id` of the [customer](#customers) for which the report has been requested # noqa: E501\n\n :param customer_id: The customer_id of this BasicReport. # noqa: E501\n :type: str\n '
if (customer_id is None):
raise ValueError('Invalid value for `customer_id`, must not be `None`')
self._customer_id = customer_id
|
@customer_id.setter
def customer_id(self, customer_id):
'Sets the customer_id of this BasicReport.\n\n the `id` of the [customer](#customers) for which the report has been requested # noqa: E501\n\n :param customer_id: The customer_id of this BasicReport. # noqa: E501\n :type: str\n '
if (customer_id is None):
raise ValueError('Invalid value for `customer_id`, must not be `None`')
self._customer_id = customer_id<|docstring|>Sets the customer_id of this BasicReport.
the `id` of the [customer](#customers) for which the report has been requested # noqa: E501
:param customer_id: The customer_id of this BasicReport. # noqa: E501
:type: str<|endoftext|>
|
b463893a8fe302175bd2206ec69d1e281dac741abe8ed2d2b8993df74db2a7f2
|
@property
def customer_identifier(self):
'Gets the customer_identifier of this BasicReport. # noqa: E501\n\n unique [customer](#customers) identifier # noqa: E501\n\n :return: The customer_identifier of this BasicReport. # noqa: E501\n :rtype: str\n '
return self._customer_identifier
|
Gets the customer_identifier of this BasicReport. # noqa: E501
unique [customer](#customers) identifier # noqa: E501
:return: The customer_identifier of this BasicReport. # noqa: E501
:rtype: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
customer_identifier
|
ltowarek/budget-supervisor
| 1
|
python
|
@property
def customer_identifier(self):
'Gets the customer_identifier of this BasicReport. # noqa: E501\n\n unique [customer](#customers) identifier # noqa: E501\n\n :return: The customer_identifier of this BasicReport. # noqa: E501\n :rtype: str\n '
return self._customer_identifier
|
@property
def customer_identifier(self):
'Gets the customer_identifier of this BasicReport. # noqa: E501\n\n unique [customer](#customers) identifier # noqa: E501\n\n :return: The customer_identifier of this BasicReport. # noqa: E501\n :rtype: str\n '
return self._customer_identifier<|docstring|>Gets the customer_identifier of this BasicReport. # noqa: E501
unique [customer](#customers) identifier # noqa: E501
:return: The customer_identifier of this BasicReport. # noqa: E501
:rtype: str<|endoftext|>
|
094b94a4291e5c377e045fc4dad76edaf93b9418452f5188005bc55c01b43046
|
@customer_identifier.setter
def customer_identifier(self, customer_identifier):
'Sets the customer_identifier of this BasicReport.\n\n unique [customer](#customers) identifier # noqa: E501\n\n :param customer_identifier: The customer_identifier of this BasicReport. # noqa: E501\n :type: str\n '
if (customer_identifier is None):
raise ValueError('Invalid value for `customer_identifier`, must not be `None`')
self._customer_identifier = customer_identifier
|
Sets the customer_identifier of this BasicReport.
unique [customer](#customers) identifier # noqa: E501
:param customer_identifier: The customer_identifier of this BasicReport. # noqa: E501
:type: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
customer_identifier
|
ltowarek/budget-supervisor
| 1
|
python
|
@customer_identifier.setter
def customer_identifier(self, customer_identifier):
'Sets the customer_identifier of this BasicReport.\n\n unique [customer](#customers) identifier # noqa: E501\n\n :param customer_identifier: The customer_identifier of this BasicReport. # noqa: E501\n :type: str\n '
if (customer_identifier is None):
raise ValueError('Invalid value for `customer_identifier`, must not be `None`')
self._customer_identifier = customer_identifier
|
@customer_identifier.setter
def customer_identifier(self, customer_identifier):
'Sets the customer_identifier of this BasicReport.\n\n unique [customer](#customers) identifier # noqa: E501\n\n :param customer_identifier: The customer_identifier of this BasicReport. # noqa: E501\n :type: str\n '
if (customer_identifier is None):
raise ValueError('Invalid value for `customer_identifier`, must not be `None`')
self._customer_identifier = customer_identifier<|docstring|>Sets the customer_identifier of this BasicReport.
unique [customer](#customers) identifier # noqa: E501
:param customer_identifier: The customer_identifier of this BasicReport. # noqa: E501
:type: str<|endoftext|>
|
0556e303854b9365d7a91bb212a09beb293ab890e25916b2d13acb745f74116a
|
@property
def connection_ids(self):
'Gets the connection_ids of this BasicReport. # noqa: E501\n\n `ids` of [connections](#connections) included in the report # noqa: E501\n\n :return: The connection_ids of this BasicReport. # noqa: E501\n :rtype: list[str]\n '
return self._connection_ids
|
Gets the connection_ids of this BasicReport. # noqa: E501
`ids` of [connections](#connections) included in the report # noqa: E501
:return: The connection_ids of this BasicReport. # noqa: E501
:rtype: list[str]
|
third_party/saltedge/swagger_client/models/basic_report.py
|
connection_ids
|
ltowarek/budget-supervisor
| 1
|
python
|
@property
def connection_ids(self):
'Gets the connection_ids of this BasicReport. # noqa: E501\n\n `ids` of [connections](#connections) included in the report # noqa: E501\n\n :return: The connection_ids of this BasicReport. # noqa: E501\n :rtype: list[str]\n '
return self._connection_ids
|
@property
def connection_ids(self):
'Gets the connection_ids of this BasicReport. # noqa: E501\n\n `ids` of [connections](#connections) included in the report # noqa: E501\n\n :return: The connection_ids of this BasicReport. # noqa: E501\n :rtype: list[str]\n '
return self._connection_ids<|docstring|>Gets the connection_ids of this BasicReport. # noqa: E501
`ids` of [connections](#connections) included in the report # noqa: E501
:return: The connection_ids of this BasicReport. # noqa: E501
:rtype: list[str]<|endoftext|>
|
5785efdefbc306a1057f03b548aa1ad00ba5cf9b45feffc7988ca1c37706d814
|
@connection_ids.setter
def connection_ids(self, connection_ids):
'Sets the connection_ids of this BasicReport.\n\n `ids` of [connections](#connections) included in the report # noqa: E501\n\n :param connection_ids: The connection_ids of this BasicReport. # noqa: E501\n :type: list[str]\n '
if (connection_ids is None):
raise ValueError('Invalid value for `connection_ids`, must not be `None`')
self._connection_ids = connection_ids
|
Sets the connection_ids of this BasicReport.
`ids` of [connections](#connections) included in the report # noqa: E501
:param connection_ids: The connection_ids of this BasicReport. # noqa: E501
:type: list[str]
|
third_party/saltedge/swagger_client/models/basic_report.py
|
connection_ids
|
ltowarek/budget-supervisor
| 1
|
python
|
@connection_ids.setter
def connection_ids(self, connection_ids):
'Sets the connection_ids of this BasicReport.\n\n `ids` of [connections](#connections) included in the report # noqa: E501\n\n :param connection_ids: The connection_ids of this BasicReport. # noqa: E501\n :type: list[str]\n '
if (connection_ids is None):
raise ValueError('Invalid value for `connection_ids`, must not be `None`')
self._connection_ids = connection_ids
|
@connection_ids.setter
def connection_ids(self, connection_ids):
'Sets the connection_ids of this BasicReport.\n\n `ids` of [connections](#connections) included in the report # noqa: E501\n\n :param connection_ids: The connection_ids of this BasicReport. # noqa: E501\n :type: list[str]\n '
if (connection_ids is None):
raise ValueError('Invalid value for `connection_ids`, must not be `None`')
self._connection_ids = connection_ids<|docstring|>Sets the connection_ids of this BasicReport.
`ids` of [connections](#connections) included in the report # noqa: E501
:param connection_ids: The connection_ids of this BasicReport. # noqa: E501
:type: list[str]<|endoftext|>
|
553fd66168530b5ed72e5a2c26c7a5b7253406c22ba2e2b9c796e269b11db2a5
|
@property
def status(self):
"Gets the status of this BasicReport. # noqa: E501\n\n current report's status. # noqa: E501\n\n :return: The status of this BasicReport. # noqa: E501\n :rtype: str\n "
return self._status
|
Gets the status of this BasicReport. # noqa: E501
current report's status. # noqa: E501
:return: The status of this BasicReport. # noqa: E501
:rtype: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
status
|
ltowarek/budget-supervisor
| 1
|
python
|
@property
def status(self):
"Gets the status of this BasicReport. # noqa: E501\n\n current report's status. # noqa: E501\n\n :return: The status of this BasicReport. # noqa: E501\n :rtype: str\n "
return self._status
|
@property
def status(self):
"Gets the status of this BasicReport. # noqa: E501\n\n current report's status. # noqa: E501\n\n :return: The status of this BasicReport. # noqa: E501\n :rtype: str\n "
return self._status<|docstring|>Gets the status of this BasicReport. # noqa: E501
current report's status. # noqa: E501
:return: The status of this BasicReport. # noqa: E501
:rtype: str<|endoftext|>
|
86a12f45f1213b90346ed3b21706e97dc35b841e51e99e482a501d8f7a6fa9ed
|
@status.setter
def status(self, status):
"Sets the status of this BasicReport.\n\n current report's status. # noqa: E501\n\n :param status: The status of this BasicReport. # noqa: E501\n :type: str\n "
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
allowed_values = ['initialized', 'success', 'failed', 'calculating']
if (status not in allowed_values):
raise ValueError('Invalid value for `status` ({0}), must be one of {1}'.format(status, allowed_values))
self._status = status
|
Sets the status of this BasicReport.
current report's status. # noqa: E501
:param status: The status of this BasicReport. # noqa: E501
:type: str
|
third_party/saltedge/swagger_client/models/basic_report.py
|
status
|
ltowarek/budget-supervisor
| 1
|
python
|
@status.setter
def status(self, status):
"Sets the status of this BasicReport.\n\n current report's status. # noqa: E501\n\n :param status: The status of this BasicReport. # noqa: E501\n :type: str\n "
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
allowed_values = ['initialized', 'success', 'failed', 'calculating']
if (status not in allowed_values):
raise ValueError('Invalid value for `status` ({0}), must be one of {1}'.format(status, allowed_values))
self._status = status
|
@status.setter
def status(self, status):
"Sets the status of this BasicReport.\n\n current report's status. # noqa: E501\n\n :param status: The status of this BasicReport. # noqa: E501\n :type: str\n "
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
allowed_values = ['initialized', 'success', 'failed', 'calculating']
if (status not in allowed_values):
raise ValueError('Invalid value for `status` ({0}), must be one of {1}'.format(status, allowed_values))
self._status = status<|docstring|>Sets the status of this BasicReport.
current report's status. # noqa: E501
:param status: The status of this BasicReport. # noqa: E501
:type: str<|endoftext|>
|
942d689784d592ed3037924e70dd1fd96ede1e56898b5b8496e63759b9d31a92
|
@property
def created_at(self):
'Gets the created_at of this BasicReport. # noqa: E501\n\n the date when the report was created # noqa: E501\n\n :return: The created_at of this BasicReport. # noqa: E501\n :rtype: datetime\n '
return self._created_at
|
Gets the created_at of this BasicReport. # noqa: E501
the date when the report was created # noqa: E501
:return: The created_at of this BasicReport. # noqa: E501
:rtype: datetime
|
third_party/saltedge/swagger_client/models/basic_report.py
|
created_at
|
ltowarek/budget-supervisor
| 1
|
python
|
@property
def created_at(self):
'Gets the created_at of this BasicReport. # noqa: E501\n\n the date when the report was created # noqa: E501\n\n :return: The created_at of this BasicReport. # noqa: E501\n :rtype: datetime\n '
return self._created_at
|
@property
def created_at(self):
'Gets the created_at of this BasicReport. # noqa: E501\n\n the date when the report was created # noqa: E501\n\n :return: The created_at of this BasicReport. # noqa: E501\n :rtype: datetime\n '
return self._created_at<|docstring|>Gets the created_at of this BasicReport. # noqa: E501
the date when the report was created # noqa: E501
:return: The created_at of this BasicReport. # noqa: E501
:rtype: datetime<|endoftext|>
|
ba8581373ef88bf0f6c6e958044bedf805ed8f7a4aadefc2f7af1d5643b59b3e
|
@created_at.setter
def created_at(self, created_at):
'Sets the created_at of this BasicReport.\n\n the date when the report was created # noqa: E501\n\n :param created_at: The created_at of this BasicReport. # noqa: E501\n :type: datetime\n '
if (created_at is None):
raise ValueError('Invalid value for `created_at`, must not be `None`')
self._created_at = created_at
|
Sets the created_at of this BasicReport.
the date when the report was created # noqa: E501
:param created_at: The created_at of this BasicReport. # noqa: E501
:type: datetime
|
third_party/saltedge/swagger_client/models/basic_report.py
|
created_at
|
ltowarek/budget-supervisor
| 1
|
python
|
@created_at.setter
def created_at(self, created_at):
'Sets the created_at of this BasicReport.\n\n the date when the report was created # noqa: E501\n\n :param created_at: The created_at of this BasicReport. # noqa: E501\n :type: datetime\n '
if (created_at is None):
raise ValueError('Invalid value for `created_at`, must not be `None`')
self._created_at = created_at
|
@created_at.setter
def created_at(self, created_at):
'Sets the created_at of this BasicReport.\n\n the date when the report was created # noqa: E501\n\n :param created_at: The created_at of this BasicReport. # noqa: E501\n :type: datetime\n '
if (created_at is None):
raise ValueError('Invalid value for `created_at`, must not be `None`')
self._created_at = created_at<|docstring|>Sets the created_at of this BasicReport.
the date when the report was created # noqa: E501
:param created_at: The created_at of this BasicReport. # noqa: E501
:type: datetime<|endoftext|>
|
e4c2b03dc387e002c3a839fd8d197b81057e7f389a063abe821368e76e146ebd
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(BasicReport, dict):
for (key, value) in self.items():
result[key] = value
return result
|
Returns the model properties as a dict
|
third_party/saltedge/swagger_client/models/basic_report.py
|
to_dict
|
ltowarek/budget-supervisor
| 1
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(BasicReport, dict):
for (key, value) in self.items():
result[key] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(BasicReport, dict):
for (key, value) in self.items():
result[key] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
third_party/saltedge/swagger_client/models/basic_report.py
|
to_str
|
ltowarek/budget-supervisor
| 1
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
third_party/saltedge/swagger_client/models/basic_report.py
|
__repr__
|
ltowarek/budget-supervisor
| 1
|
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
451e6e90103f57af9658e681973c907502767a75b976899dfb46db60fbfcb70f
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, BasicReport)):
return False
return (self.__dict__ == other.__dict__)
|
Returns true if both objects are equal
|
third_party/saltedge/swagger_client/models/basic_report.py
|
__eq__
|
ltowarek/budget-supervisor
| 1
|
python
|
def __eq__(self, other):
if (not isinstance(other, BasicReport)):
return False
return (self.__dict__ == other.__dict__)
|
def __eq__(self, other):
if (not isinstance(other, BasicReport)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
|
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
|
Returns true if both objects are not equal
|
third_party/saltedge/swagger_client/models/basic_report.py
|
__ne__
|
ltowarek/budget-supervisor
| 1
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
4e08c505fb801b8889e44f177fffbf8f38b614186d0e4f2c906635d0d0e2c517
|
def resolve(self, settings, msg):
'\n Resolves offset specifications to a numeric offset. Returns a copy\n of the action object.\n '
c = copy.copy(self)
l = msg.length(settings)
if (c.offset == 'r'):
c.offset = random.randrange(l)
elif (c.offset == 'a'):
c.offset = (l + 1)
return c
|
Resolves offset specifications to a numeric offset. Returns a copy
of the action object.
|
pathod/language/actions.py
|
resolve
|
jinlin0/mitmproxy
| 74
|
python
|
def resolve(self, settings, msg):
'\n Resolves offset specifications to a numeric offset. Returns a copy\n of the action object.\n '
c = copy.copy(self)
l = msg.length(settings)
if (c.offset == 'r'):
c.offset = random.randrange(l)
elif (c.offset == 'a'):
c.offset = (l + 1)
return c
|
def resolve(self, settings, msg):
'\n Resolves offset specifications to a numeric offset. Returns a copy\n of the action object.\n '
c = copy.copy(self)
l = msg.length(settings)
if (c.offset == 'r'):
c.offset = random.randrange(l)
elif (c.offset == 'a'):
c.offset = (l + 1)
return c<|docstring|>Resolves offset specifications to a numeric offset. Returns a copy
of the action object.<|endoftext|>
|
baebf3ecb5cec7928991f0857f7570a25e7d3e8eee7522d6f6842d126a546a5a
|
def regularize_cost(regex, func, name='regularize_cost'):
'\n Apply a regularizer on trainable variables matching the regex, and print\n the matched variables (only print once in multi-tower training).\n In replicated mode, it will only regularize variables within the current tower.\n\n Args:\n regex (str): a regex to match variable names, e.g. "conv.*/W"\n func: the regularization function, which takes a tensor and returns a scalar tensor.\n E.g., ``tf.contrib.layers.l2_regularizer``.\n\n Returns:\n tf.Tensor: the total regularization cost.\n\n Example:\n .. code-block:: python\n\n cost = cost + regularize_cost("fc.*/W", l2_regularizer(1e-5))\n '
assert len(regex)
ctx = get_current_tower_context()
if (not ctx.is_training):
return tf.constant(0, dtype=tf.float32, name=('empty_' + name))
if ctx.has_own_variables:
params = ctx.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)
else:
params = tf.trainable_variables()
to_regularize = []
with tf.name_scope((name + '_internals')):
costs = []
for p in params:
para_name = p.op.name
if re.search(regex, para_name):
costs.append(func(p))
to_regularize.append(p.name)
if (not costs):
return tf.constant(0, dtype=tf.float32, name=('empty_' + name))
if len(ctx.vs_name):
prefix = (ctx.vs_name + '/')
prefixlen = len(prefix)
def f(name):
if name.startswith(prefix):
return name[prefixlen:]
return name
to_regularize = list(map(f, to_regularize))
to_print = ', '.join(to_regularize)
_log_regularizer(to_print)
return tf.add_n(costs, name=name)
|
Apply a regularizer on trainable variables matching the regex, and print
the matched variables (only print once in multi-tower training).
In replicated mode, it will only regularize variables within the current tower.
Args:
regex (str): a regex to match variable names, e.g. "conv.*/W"
func: the regularization function, which takes a tensor and returns a scalar tensor.
E.g., ``tf.contrib.layers.l2_regularizer``.
Returns:
tf.Tensor: the total regularization cost.
Example:
.. code-block:: python
cost = cost + regularize_cost("fc.*/W", l2_regularizer(1e-5))
|
tensorpack/models/regularize.py
|
regularize_cost
|
dongzhuoyao/tensorpack
| 3
|
python
|
def regularize_cost(regex, func, name='regularize_cost'):
'\n Apply a regularizer on trainable variables matching the regex, and print\n the matched variables (only print once in multi-tower training).\n In replicated mode, it will only regularize variables within the current tower.\n\n Args:\n regex (str): a regex to match variable names, e.g. "conv.*/W"\n func: the regularization function, which takes a tensor and returns a scalar tensor.\n E.g., ``tf.contrib.layers.l2_regularizer``.\n\n Returns:\n tf.Tensor: the total regularization cost.\n\n Example:\n .. code-block:: python\n\n cost = cost + regularize_cost("fc.*/W", l2_regularizer(1e-5))\n '
assert len(regex)
ctx = get_current_tower_context()
if (not ctx.is_training):
return tf.constant(0, dtype=tf.float32, name=('empty_' + name))
if ctx.has_own_variables:
params = ctx.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)
else:
params = tf.trainable_variables()
to_regularize = []
with tf.name_scope((name + '_internals')):
costs = []
for p in params:
para_name = p.op.name
if re.search(regex, para_name):
costs.append(func(p))
to_regularize.append(p.name)
if (not costs):
return tf.constant(0, dtype=tf.float32, name=('empty_' + name))
if len(ctx.vs_name):
prefix = (ctx.vs_name + '/')
prefixlen = len(prefix)
def f(name):
if name.startswith(prefix):
return name[prefixlen:]
return name
to_regularize = list(map(f, to_regularize))
to_print = ', '.join(to_regularize)
_log_regularizer(to_print)
return tf.add_n(costs, name=name)
|
def regularize_cost(regex, func, name='regularize_cost'):
'\n Apply a regularizer on trainable variables matching the regex, and print\n the matched variables (only print once in multi-tower training).\n In replicated mode, it will only regularize variables within the current tower.\n\n Args:\n regex (str): a regex to match variable names, e.g. "conv.*/W"\n func: the regularization function, which takes a tensor and returns a scalar tensor.\n E.g., ``tf.contrib.layers.l2_regularizer``.\n\n Returns:\n tf.Tensor: the total regularization cost.\n\n Example:\n .. code-block:: python\n\n cost = cost + regularize_cost("fc.*/W", l2_regularizer(1e-5))\n '
assert len(regex)
ctx = get_current_tower_context()
if (not ctx.is_training):
return tf.constant(0, dtype=tf.float32, name=('empty_' + name))
if ctx.has_own_variables:
params = ctx.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)
else:
params = tf.trainable_variables()
to_regularize = []
with tf.name_scope((name + '_internals')):
costs = []
for p in params:
para_name = p.op.name
if re.search(regex, para_name):
costs.append(func(p))
to_regularize.append(p.name)
if (not costs):
return tf.constant(0, dtype=tf.float32, name=('empty_' + name))
if len(ctx.vs_name):
prefix = (ctx.vs_name + '/')
prefixlen = len(prefix)
def f(name):
if name.startswith(prefix):
return name[prefixlen:]
return name
to_regularize = list(map(f, to_regularize))
to_print = ', '.join(to_regularize)
_log_regularizer(to_print)
return tf.add_n(costs, name=name)<|docstring|>Apply a regularizer on trainable variables matching the regex, and print
the matched variables (only print once in multi-tower training).
In replicated mode, it will only regularize variables within the current tower.
Args:
regex (str): a regex to match variable names, e.g. "conv.*/W"
func: the regularization function, which takes a tensor and returns a scalar tensor.
E.g., ``tf.contrib.layers.l2_regularizer``.
Returns:
tf.Tensor: the total regularization cost.
Example:
.. code-block:: python
cost = cost + regularize_cost("fc.*/W", l2_regularizer(1e-5))<|endoftext|>
|
a4b62b9c90f26c910bc9a73d586b547ab0dfbcce33def8ee091ec60729ad6359
|
def regularize_cost_from_collection(name='regularize_cost'):
'\n Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.\n In replicated mode, will only regularize variables within the current tower.\n\n Returns:\n a scalar tensor, the regularization loss, or None\n '
ctx = get_current_tower_context()
if (not ctx.is_training):
return None
if ctx.has_own_variables:
losses = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)
else:
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if (len(losses) > 0):
logger.info('Add REGULARIZATION_LOSSES of {} tensors on the total cost.'.format(len(losses)))
reg_loss = tf.add_n(losses, name=name)
return reg_loss
else:
return None
|
Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.
In replicated mode, will only regularize variables within the current tower.
Returns:
a scalar tensor, the regularization loss, or None
|
tensorpack/models/regularize.py
|
regularize_cost_from_collection
|
dongzhuoyao/tensorpack
| 3
|
python
|
def regularize_cost_from_collection(name='regularize_cost'):
'\n Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.\n In replicated mode, will only regularize variables within the current tower.\n\n Returns:\n a scalar tensor, the regularization loss, or None\n '
ctx = get_current_tower_context()
if (not ctx.is_training):
return None
if ctx.has_own_variables:
losses = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)
else:
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if (len(losses) > 0):
logger.info('Add REGULARIZATION_LOSSES of {} tensors on the total cost.'.format(len(losses)))
reg_loss = tf.add_n(losses, name=name)
return reg_loss
else:
return None
|
def regularize_cost_from_collection(name='regularize_cost'):
'\n Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.\n In replicated mode, will only regularize variables within the current tower.\n\n Returns:\n a scalar tensor, the regularization loss, or None\n '
ctx = get_current_tower_context()
if (not ctx.is_training):
return None
if ctx.has_own_variables:
losses = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)
else:
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if (len(losses) > 0):
logger.info('Add REGULARIZATION_LOSSES of {} tensors on the total cost.'.format(len(losses)))
reg_loss = tf.add_n(losses, name=name)
return reg_loss
else:
return None<|docstring|>Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.
In replicated mode, will only regularize variables within the current tower.
Returns:
a scalar tensor, the regularization loss, or None<|endoftext|>
|
dd854b2179e59d84e9891f0160b58dfefc775d8a6b3723694b75df1539a3ae90
|
@layer_register(use_scope=None)
def Dropout(x, keep_prob=0.5, is_training=None, noise_shape=None):
'\n Dropout layer as in the paper `Dropout: a Simple Way to Prevent\n Neural Networks from Overfitting <http://dl.acm.org/citation.cfm?id=2670313>`_.\n\n Args:\n keep_prob (float): the probability that each element is kept. It is only used\n when is_training=True.\n is_training (bool): If None, will use the current :class:`tensorpack.tfutils.TowerContext`\n to figure out.\n noise_shape: same as `tf.nn.dropout`.\n '
if (is_training is None):
is_training = get_current_tower_context().is_training
return tf.layers.dropout(x, rate=(1 - keep_prob), noise_shape=noise_shape, training=is_training)
|
Dropout layer as in the paper `Dropout: a Simple Way to Prevent
Neural Networks from Overfitting <http://dl.acm.org/citation.cfm?id=2670313>`_.
Args:
keep_prob (float): the probability that each element is kept. It is only used
when is_training=True.
is_training (bool): If None, will use the current :class:`tensorpack.tfutils.TowerContext`
to figure out.
noise_shape: same as `tf.nn.dropout`.
|
tensorpack/models/regularize.py
|
Dropout
|
dongzhuoyao/tensorpack
| 3
|
python
|
@layer_register(use_scope=None)
def Dropout(x, keep_prob=0.5, is_training=None, noise_shape=None):
'\n Dropout layer as in the paper `Dropout: a Simple Way to Prevent\n Neural Networks from Overfitting <http://dl.acm.org/citation.cfm?id=2670313>`_.\n\n Args:\n keep_prob (float): the probability that each element is kept. It is only used\n when is_training=True.\n is_training (bool): If None, will use the current :class:`tensorpack.tfutils.TowerContext`\n to figure out.\n noise_shape: same as `tf.nn.dropout`.\n '
if (is_training is None):
is_training = get_current_tower_context().is_training
return tf.layers.dropout(x, rate=(1 - keep_prob), noise_shape=noise_shape, training=is_training)
|
@layer_register(use_scope=None)
def Dropout(x, keep_prob=0.5, is_training=None, noise_shape=None):
'\n Dropout layer as in the paper `Dropout: a Simple Way to Prevent\n Neural Networks from Overfitting <http://dl.acm.org/citation.cfm?id=2670313>`_.\n\n Args:\n keep_prob (float): the probability that each element is kept. It is only used\n when is_training=True.\n is_training (bool): If None, will use the current :class:`tensorpack.tfutils.TowerContext`\n to figure out.\n noise_shape: same as `tf.nn.dropout`.\n '
if (is_training is None):
is_training = get_current_tower_context().is_training
return tf.layers.dropout(x, rate=(1 - keep_prob), noise_shape=noise_shape, training=is_training)<|docstring|>Dropout layer as in the paper `Dropout: a Simple Way to Prevent
Neural Networks from Overfitting <http://dl.acm.org/citation.cfm?id=2670313>`_.
Args:
keep_prob (float): the probability that each element is kept. It is only used
when is_training=True.
is_training (bool): If None, will use the current :class:`tensorpack.tfutils.TowerContext`
to figure out.
noise_shape: same as `tf.nn.dropout`.<|endoftext|>
|
ab7fc728838dcc665f9df81599465a4b5e2ad79aa0fa71c7863d2fe408a26d18
|
def get_next_page_number(self) -> typing.Optional[int]:
'Get the next page number.'
if (not self.page.has_next()):
return None
page_number = self.page.next_page_number()
return page_number
|
Get the next page number.
|
pydis_site/apps/api/viewsets/bot/user.py
|
get_next_page_number
|
Anubhav1603/site
| 0
|
python
|
def get_next_page_number(self) -> typing.Optional[int]:
if (not self.page.has_next()):
return None
page_number = self.page.next_page_number()
return page_number
|
def get_next_page_number(self) -> typing.Optional[int]:
if (not self.page.has_next()):
return None
page_number = self.page.next_page_number()
return page_number<|docstring|>Get the next page number.<|endoftext|>
|
6752c3d279046a7421450bcd27f9d18ec3b218be419eb78c70b7c829c9de6ee1
|
def get_previous_page_number(self) -> typing.Optional[int]:
'Get the previous page number.'
if (not self.page.has_previous()):
return None
page_number = self.page.previous_page_number()
return page_number
|
Get the previous page number.
|
pydis_site/apps/api/viewsets/bot/user.py
|
get_previous_page_number
|
Anubhav1603/site
| 0
|
python
|
def get_previous_page_number(self) -> typing.Optional[int]:
if (not self.page.has_previous()):
return None
page_number = self.page.previous_page_number()
return page_number
|
def get_previous_page_number(self) -> typing.Optional[int]:
if (not self.page.has_previous()):
return None
page_number = self.page.previous_page_number()
return page_number<|docstring|>Get the previous page number.<|endoftext|>
|
1c6639542d4f595240e8f112585ad8de8af5edad69a48b02afbd236aaf2ce4ef
|
def get_paginated_response(self, data: list) -> Response:
'Override method to send modified response.'
return Response(OrderedDict([('count', self.page.paginator.count), ('next_page_no', self.get_next_page_number()), ('previous_page_no', self.get_previous_page_number()), ('results', data)]))
|
Override method to send modified response.
|
pydis_site/apps/api/viewsets/bot/user.py
|
get_paginated_response
|
Anubhav1603/site
| 0
|
python
|
def get_paginated_response(self, data: list) -> Response:
return Response(OrderedDict([('count', self.page.paginator.count), ('next_page_no', self.get_next_page_number()), ('previous_page_no', self.get_previous_page_number()), ('results', data)]))
|
def get_paginated_response(self, data: list) -> Response:
return Response(OrderedDict([('count', self.page.paginator.count), ('next_page_no', self.get_next_page_number()), ('previous_page_no', self.get_previous_page_number()), ('results', data)]))<|docstring|>Override method to send modified response.<|endoftext|>
|
adc116a6b75af43aadb55232e26671685acfbbefcf6cf0a0f278fd37d1ede14e
|
def get_serializer(self, *args, **kwargs) -> ModelSerializer:
'Set Serializer many attribute to True if request body contains a list.'
if isinstance(kwargs.get('data', {}), list):
kwargs['many'] = True
return super().get_serializer(*args, **kwargs)
|
Set Serializer many attribute to True if request body contains a list.
|
pydis_site/apps/api/viewsets/bot/user.py
|
get_serializer
|
Anubhav1603/site
| 0
|
python
|
def get_serializer(self, *args, **kwargs) -> ModelSerializer:
if isinstance(kwargs.get('data', {}), list):
kwargs['many'] = True
return super().get_serializer(*args, **kwargs)
|
def get_serializer(self, *args, **kwargs) -> ModelSerializer:
if isinstance(kwargs.get('data', {}), list):
kwargs['many'] = True
return super().get_serializer(*args, **kwargs)<|docstring|>Set Serializer many attribute to True if request body contains a list.<|endoftext|>
|
211f50ed86e5002d695194c01293d53143faba85a3a7ea206668afec2a97b8cb
|
@action(detail=False, methods=['PATCH'], name='user-bulk-patch')
def bulk_patch(self, request: Request) -> Response:
'Update multiple User objects in a single request.'
serializer = self.get_serializer(instance=self.get_queryset(), data=request.data, many=True, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
|
Update multiple User objects in a single request.
|
pydis_site/apps/api/viewsets/bot/user.py
|
bulk_patch
|
Anubhav1603/site
| 0
|
python
|
@action(detail=False, methods=['PATCH'], name='user-bulk-patch')
def bulk_patch(self, request: Request) -> Response:
serializer = self.get_serializer(instance=self.get_queryset(), data=request.data, many=True, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
|
@action(detail=False, methods=['PATCH'], name='user-bulk-patch')
def bulk_patch(self, request: Request) -> Response:
serializer = self.get_serializer(instance=self.get_queryset(), data=request.data, many=True, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)<|docstring|>Update multiple User objects in a single request.<|endoftext|>
|
bdc03ddb26b4bcb0aba8051be3bed82b02d6d390f2cd8a5e0198d5a6d189b06c
|
def _set_default_schedule_and_storage_types(sdfg, toplevel_schedule):
" Sets default storage and schedule types throughout SDFG.\n Replaces `ScheduleType.Default` and `StorageType.Default`\n with the corresponding types according to the parent scope's\n schedule. "
for state in sdfg.nodes():
scope_dict = state.scope_dict()
reverse_scope_dict = state.scope_dict(node_to_children=True)
def set_default_in_scope(parent_node):
if (parent_node is None):
parent_schedule = toplevel_schedule
else:
parent_schedule = parent_node.map.schedule
for node in reverse_scope_dict[parent_node]:
if isinstance(node, nodes.MapEntry):
if (node.map.schedule == dtypes.ScheduleType.Default):
node.map._schedule = dtypes.SCOPEDEFAULT_SCHEDULE[parent_schedule]
set_default_in_scope(node)
elif isinstance(node, nodes.ConsumeEntry):
if (node.consume.schedule == dtypes.ScheduleType.Default):
node.consume._schedule = dtypes.SCOPEDEFAULT_SCHEDULE[parent_schedule]
set_default_in_scope(node)
elif getattr(node, 'schedule', False):
if (node.schedule == dtypes.ScheduleType.Default):
node._schedule = parent_schedule
set_default_in_scope(None)
for node in state.nodes():
if isinstance(node, nodes.AccessNode):
if (node.desc(sdfg).storage == dtypes.StorageType.Default):
if (scope_dict[node] is None):
parent_schedule = toplevel_schedule
else:
parent_schedule = scope_dict[node].map.schedule
node.desc(sdfg).storage = dtypes.SCOPEDEFAULT_STORAGE[parent_schedule]
|
Sets default storage and schedule types throughout SDFG.
Replaces `ScheduleType.Default` and `StorageType.Default`
with the corresponding types according to the parent scope's
schedule.
|
dace/codegen/targets/framecode.py
|
_set_default_schedule_and_storage_types
|
tbennun/dace
| 1
|
python
|
def _set_default_schedule_and_storage_types(sdfg, toplevel_schedule):
" Sets default storage and schedule types throughout SDFG.\n Replaces `ScheduleType.Default` and `StorageType.Default`\n with the corresponding types according to the parent scope's\n schedule. "
for state in sdfg.nodes():
scope_dict = state.scope_dict()
reverse_scope_dict = state.scope_dict(node_to_children=True)
def set_default_in_scope(parent_node):
if (parent_node is None):
parent_schedule = toplevel_schedule
else:
parent_schedule = parent_node.map.schedule
for node in reverse_scope_dict[parent_node]:
if isinstance(node, nodes.MapEntry):
if (node.map.schedule == dtypes.ScheduleType.Default):
node.map._schedule = dtypes.SCOPEDEFAULT_SCHEDULE[parent_schedule]
set_default_in_scope(node)
elif isinstance(node, nodes.ConsumeEntry):
if (node.consume.schedule == dtypes.ScheduleType.Default):
node.consume._schedule = dtypes.SCOPEDEFAULT_SCHEDULE[parent_schedule]
set_default_in_scope(node)
elif getattr(node, 'schedule', False):
if (node.schedule == dtypes.ScheduleType.Default):
node._schedule = parent_schedule
set_default_in_scope(None)
for node in state.nodes():
if isinstance(node, nodes.AccessNode):
if (node.desc(sdfg).storage == dtypes.StorageType.Default):
if (scope_dict[node] is None):
parent_schedule = toplevel_schedule
else:
parent_schedule = scope_dict[node].map.schedule
node.desc(sdfg).storage = dtypes.SCOPEDEFAULT_STORAGE[parent_schedule]
|
def _set_default_schedule_and_storage_types(sdfg, toplevel_schedule):
" Sets default storage and schedule types throughout SDFG.\n Replaces `ScheduleType.Default` and `StorageType.Default`\n with the corresponding types according to the parent scope's\n schedule. "
for state in sdfg.nodes():
scope_dict = state.scope_dict()
reverse_scope_dict = state.scope_dict(node_to_children=True)
def set_default_in_scope(parent_node):
if (parent_node is None):
parent_schedule = toplevel_schedule
else:
parent_schedule = parent_node.map.schedule
for node in reverse_scope_dict[parent_node]:
if isinstance(node, nodes.MapEntry):
if (node.map.schedule == dtypes.ScheduleType.Default):
node.map._schedule = dtypes.SCOPEDEFAULT_SCHEDULE[parent_schedule]
set_default_in_scope(node)
elif isinstance(node, nodes.ConsumeEntry):
if (node.consume.schedule == dtypes.ScheduleType.Default):
node.consume._schedule = dtypes.SCOPEDEFAULT_SCHEDULE[parent_schedule]
set_default_in_scope(node)
elif getattr(node, 'schedule', False):
if (node.schedule == dtypes.ScheduleType.Default):
node._schedule = parent_schedule
set_default_in_scope(None)
for node in state.nodes():
if isinstance(node, nodes.AccessNode):
if (node.desc(sdfg).storage == dtypes.StorageType.Default):
if (scope_dict[node] is None):
parent_schedule = toplevel_schedule
else:
parent_schedule = scope_dict[node].map.schedule
node.desc(sdfg).storage = dtypes.SCOPEDEFAULT_STORAGE[parent_schedule]<|docstring|>Sets default storage and schedule types throughout SDFG.
Replaces `ScheduleType.Default` and `StorageType.Default`
with the corresponding types according to the parent scope's
schedule.<|endoftext|>
|
35ca43d55fca7ffaf9272f86b9d102b39b0ea5a496a12737dd10328885a841a7
|
def generate_fileheader(self, sdfg: SDFG, global_stream: CodeIOStream):
' Generate a header in every output file that includes custom types\n and constants.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n '
datatypes = set()
for (_, arrname, arr) in sdfg.arrays_recursive():
if (arr is not None):
datatypes.add(arr.dtype)
global_stream.write('\n')
for typ in datatypes:
if hasattr(typ, 'emit_definition'):
global_stream.write(typ.emit_definition(), sdfg)
global_stream.write('\n')
self.generate_constants(sdfg, global_stream)
global_stream.write(sdfg.global_code, sdfg)
|
Generate a header in every output file that includes custom types
and constants.
@param sdfg: The input SDFG.
@param global_stream: Stream to write to (global).
|
dace/codegen/targets/framecode.py
|
generate_fileheader
|
tbennun/dace
| 1
|
python
|
def generate_fileheader(self, sdfg: SDFG, global_stream: CodeIOStream):
' Generate a header in every output file that includes custom types\n and constants.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n '
datatypes = set()
for (_, arrname, arr) in sdfg.arrays_recursive():
if (arr is not None):
datatypes.add(arr.dtype)
global_stream.write('\n')
for typ in datatypes:
if hasattr(typ, 'emit_definition'):
global_stream.write(typ.emit_definition(), sdfg)
global_stream.write('\n')
self.generate_constants(sdfg, global_stream)
global_stream.write(sdfg.global_code, sdfg)
|
def generate_fileheader(self, sdfg: SDFG, global_stream: CodeIOStream):
' Generate a header in every output file that includes custom types\n and constants.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n '
datatypes = set()
for (_, arrname, arr) in sdfg.arrays_recursive():
if (arr is not None):
datatypes.add(arr.dtype)
global_stream.write('\n')
for typ in datatypes:
if hasattr(typ, 'emit_definition'):
global_stream.write(typ.emit_definition(), sdfg)
global_stream.write('\n')
self.generate_constants(sdfg, global_stream)
global_stream.write(sdfg.global_code, sdfg)<|docstring|>Generate a header in every output file that includes custom types
and constants.
@param sdfg: The input SDFG.
@param global_stream: Stream to write to (global).<|endoftext|>
|
e867d11ad06c98551a69c6a61d3744927ee870d8e020184b6d2f21521dde7a5b
|
def generate_header(self, sdfg: SDFG, global_stream: CodeIOStream, callsite_stream: CodeIOStream):
' Generate the header of the frame-code. Code exists in a separate\n function for overriding purposes.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n @param callsite_stream: Stream to write to (at call site).\n '
fname = sdfg.name
params = sdfg.signature()
global_stream.write(('/* DaCe AUTO-GENERATED FILE. DO NOT MODIFY */\n' + '#include <dace/dace.h>\n'), sdfg)
self.generate_fileheader(sdfg, callsite_stream)
callsite_stream.write(('void __program_%s_internal(%s)\n{\n' % (fname, params)), sdfg)
for instr in self._dispatcher.instrumentation.values():
if (instr is not None):
instr.on_sdfg_begin(sdfg, callsite_stream, global_stream)
|
Generate the header of the frame-code. Code exists in a separate
function for overriding purposes.
@param sdfg: The input SDFG.
@param global_stream: Stream to write to (global).
@param callsite_stream: Stream to write to (at call site).
|
dace/codegen/targets/framecode.py
|
generate_header
|
tbennun/dace
| 1
|
python
|
def generate_header(self, sdfg: SDFG, global_stream: CodeIOStream, callsite_stream: CodeIOStream):
' Generate the header of the frame-code. Code exists in a separate\n function for overriding purposes.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n @param callsite_stream: Stream to write to (at call site).\n '
fname = sdfg.name
params = sdfg.signature()
global_stream.write(('/* DaCe AUTO-GENERATED FILE. DO NOT MODIFY */\n' + '#include <dace/dace.h>\n'), sdfg)
self.generate_fileheader(sdfg, callsite_stream)
callsite_stream.write(('void __program_%s_internal(%s)\n{\n' % (fname, params)), sdfg)
for instr in self._dispatcher.instrumentation.values():
if (instr is not None):
instr.on_sdfg_begin(sdfg, callsite_stream, global_stream)
|
def generate_header(self, sdfg: SDFG, global_stream: CodeIOStream, callsite_stream: CodeIOStream):
' Generate the header of the frame-code. Code exists in a separate\n function for overriding purposes.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n @param callsite_stream: Stream to write to (at call site).\n '
fname = sdfg.name
params = sdfg.signature()
global_stream.write(('/* DaCe AUTO-GENERATED FILE. DO NOT MODIFY */\n' + '#include <dace/dace.h>\n'), sdfg)
self.generate_fileheader(sdfg, callsite_stream)
callsite_stream.write(('void __program_%s_internal(%s)\n{\n' % (fname, params)), sdfg)
for instr in self._dispatcher.instrumentation.values():
if (instr is not None):
instr.on_sdfg_begin(sdfg, callsite_stream, global_stream)<|docstring|>Generate the header of the frame-code. Code exists in a separate
function for overriding purposes.
@param sdfg: The input SDFG.
@param global_stream: Stream to write to (global).
@param callsite_stream: Stream to write to (at call site).<|endoftext|>
|
d79deb4b958a35a82d14755f39e57c3e155541b0c6d93b2bc9f891455b4a8216
|
def generate_footer(self, sdfg: SDFG, global_stream: CodeIOStream, callsite_stream: CodeIOStream):
' Generate the footer of the frame-code. Code exists in a separate\n function for overriding purposes.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n @param callsite_stream: Stream to write to (at call site).\n '
fname = sdfg.name
params = sdfg.signature()
paramnames = sdfg.signature(False, for_call=True)
for instr in self._dispatcher.instrumentation.values():
if (instr is not None):
instr.on_sdfg_end(sdfg, callsite_stream, global_stream)
callsite_stream.write('}\n', sdfg)
callsite_stream.write(('\nvoid __program_%s_internal(%s);\nDACE_EXPORTED void __program_%s(%s)\n{\n __program_%s_internal(%s);\n}\n' % (fname, params, fname, params, fname, paramnames)), sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(('DACE_EXPORTED int __dace_init_%s(%s);\n' % (target.target_name, params)), sdfg)
if target.has_finalizer:
callsite_stream.write(('DACE_EXPORTED int __dace_exit_%s(%s);\n' % (target.target_name, params)), sdfg)
callsite_stream.write(('\nDACE_EXPORTED int __dace_init(%s)\n{\n int __result = 0;\n' % params), sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(('__result |= __dace_init_%s(%s);' % (target.target_name, paramnames)), sdfg)
callsite_stream.write(sdfg.init_code, sdfg)
callsite_stream.write(self._initcode.getvalue(), sdfg)
callsite_stream.write(('\n return __result;\n}\n\nDACE_EXPORTED void __dace_exit(%s)\n{\n' % params), sdfg)
callsite_stream.write(self._exitcode.getvalue(), sdfg)
callsite_stream.write(sdfg.exit_code, sdfg)
for target in self._dispatcher.used_targets:
if target.has_finalizer:
callsite_stream.write(('__dace_exit_%s(%s);' % (target.target_name, paramnames)), sdfg)
callsite_stream.write('}\n', sdfg)
|
Generate the footer of the frame-code. Code exists in a separate
function for overriding purposes.
@param sdfg: The input SDFG.
@param global_stream: Stream to write to (global).
@param callsite_stream: Stream to write to (at call site).
|
dace/codegen/targets/framecode.py
|
generate_footer
|
tbennun/dace
| 1
|
python
|
def generate_footer(self, sdfg: SDFG, global_stream: CodeIOStream, callsite_stream: CodeIOStream):
' Generate the footer of the frame-code. Code exists in a separate\n function for overriding purposes.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n @param callsite_stream: Stream to write to (at call site).\n '
fname = sdfg.name
params = sdfg.signature()
paramnames = sdfg.signature(False, for_call=True)
for instr in self._dispatcher.instrumentation.values():
if (instr is not None):
instr.on_sdfg_end(sdfg, callsite_stream, global_stream)
callsite_stream.write('}\n', sdfg)
callsite_stream.write(('\nvoid __program_%s_internal(%s);\nDACE_EXPORTED void __program_%s(%s)\n{\n __program_%s_internal(%s);\n}\n' % (fname, params, fname, params, fname, paramnames)), sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(('DACE_EXPORTED int __dace_init_%s(%s);\n' % (target.target_name, params)), sdfg)
if target.has_finalizer:
callsite_stream.write(('DACE_EXPORTED int __dace_exit_%s(%s);\n' % (target.target_name, params)), sdfg)
callsite_stream.write(('\nDACE_EXPORTED int __dace_init(%s)\n{\n int __result = 0;\n' % params), sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(('__result |= __dace_init_%s(%s);' % (target.target_name, paramnames)), sdfg)
callsite_stream.write(sdfg.init_code, sdfg)
callsite_stream.write(self._initcode.getvalue(), sdfg)
callsite_stream.write(('\n return __result;\n}\n\nDACE_EXPORTED void __dace_exit(%s)\n{\n' % params), sdfg)
callsite_stream.write(self._exitcode.getvalue(), sdfg)
callsite_stream.write(sdfg.exit_code, sdfg)
for target in self._dispatcher.used_targets:
if target.has_finalizer:
callsite_stream.write(('__dace_exit_%s(%s);' % (target.target_name, paramnames)), sdfg)
callsite_stream.write('}\n', sdfg)
|
def generate_footer(self, sdfg: SDFG, global_stream: CodeIOStream, callsite_stream: CodeIOStream):
' Generate the footer of the frame-code. Code exists in a separate\n function for overriding purposes.\n @param sdfg: The input SDFG.\n @param global_stream: Stream to write to (global).\n @param callsite_stream: Stream to write to (at call site).\n '
fname = sdfg.name
params = sdfg.signature()
paramnames = sdfg.signature(False, for_call=True)
for instr in self._dispatcher.instrumentation.values():
if (instr is not None):
instr.on_sdfg_end(sdfg, callsite_stream, global_stream)
callsite_stream.write('}\n', sdfg)
callsite_stream.write(('\nvoid __program_%s_internal(%s);\nDACE_EXPORTED void __program_%s(%s)\n{\n __program_%s_internal(%s);\n}\n' % (fname, params, fname, params, fname, paramnames)), sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(('DACE_EXPORTED int __dace_init_%s(%s);\n' % (target.target_name, params)), sdfg)
if target.has_finalizer:
callsite_stream.write(('DACE_EXPORTED int __dace_exit_%s(%s);\n' % (target.target_name, params)), sdfg)
callsite_stream.write(('\nDACE_EXPORTED int __dace_init(%s)\n{\n int __result = 0;\n' % params), sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(('__result |= __dace_init_%s(%s);' % (target.target_name, paramnames)), sdfg)
callsite_stream.write(sdfg.init_code, sdfg)
callsite_stream.write(self._initcode.getvalue(), sdfg)
callsite_stream.write(('\n return __result;\n}\n\nDACE_EXPORTED void __dace_exit(%s)\n{\n' % params), sdfg)
callsite_stream.write(self._exitcode.getvalue(), sdfg)
callsite_stream.write(sdfg.exit_code, sdfg)
for target in self._dispatcher.used_targets:
if target.has_finalizer:
callsite_stream.write(('__dace_exit_%s(%s);' % (target.target_name, paramnames)), sdfg)
callsite_stream.write('}\n', sdfg)<|docstring|>Generate the footer of the frame-code. Code exists in a separate
function for overriding purposes.
@param sdfg: The input SDFG.
@param global_stream: Stream to write to (global).
@param callsite_stream: Stream to write to (at call site).<|endoftext|>
|
e74276d020dd0754d98078df35df52841c02ec732655ecf9bd356109858514da
|
@staticmethod
def all_nodes_between(graph, begin, end):
'Finds all nodes between begin and end. Returns None if there is any\n path starting at begin that does not reach end.'
to_visit = [begin]
seen = set()
while (len(to_visit) > 0):
n = to_visit.pop()
if (n == end):
continue
if (n in seen):
continue
seen.add(n)
node_out_edges = graph.out_edges(n)
if (len(node_out_edges) == 0):
return None
for e in node_out_edges:
next_node = e.dst
if ((next_node != end) and (next_node not in seen)):
to_visit.append(next_node)
return seen
|
Finds all nodes between begin and end. Returns None if there is any
path starting at begin that does not reach end.
|
dace/codegen/targets/framecode.py
|
all_nodes_between
|
tbennun/dace
| 1
|
python
|
@staticmethod
def all_nodes_between(graph, begin, end):
'Finds all nodes between begin and end. Returns None if there is any\n path starting at begin that does not reach end.'
to_visit = [begin]
seen = set()
while (len(to_visit) > 0):
n = to_visit.pop()
if (n == end):
continue
if (n in seen):
continue
seen.add(n)
node_out_edges = graph.out_edges(n)
if (len(node_out_edges) == 0):
return None
for e in node_out_edges:
next_node = e.dst
if ((next_node != end) and (next_node not in seen)):
to_visit.append(next_node)
return seen
|
@staticmethod
def all_nodes_between(graph, begin, end):
'Finds all nodes between begin and end. Returns None if there is any\n path starting at begin that does not reach end.'
to_visit = [begin]
seen = set()
while (len(to_visit) > 0):
n = to_visit.pop()
if (n == end):
continue
if (n in seen):
continue
seen.add(n)
node_out_edges = graph.out_edges(n)
if (len(node_out_edges) == 0):
return None
for e in node_out_edges:
next_node = e.dst
if ((next_node != end) and (next_node not in seen)):
to_visit.append(next_node)
return seen<|docstring|>Finds all nodes between begin and end. Returns None if there is any
path starting at begin that does not reach end.<|endoftext|>
|
660502a749b662578abfadb52a603c4cc62ef6cdd86a67560469cd42b2da6f67
|
def generate_code(self, sdfg: SDFG, schedule: dtypes.ScheduleType, sdfg_id: str='') -> (str, str, Set[TargetCodeGenerator]):
" Generate frame code for a given SDFG, calling registered targets'\n code generation callbacks for them to generate their own code.\n @param sdfg: The SDFG to generate code for.\n @param schedule: The schedule the SDFG is currently located, or\n None if the SDFG is top-level.\n @param sdfg_id: An optional string id given to the SDFG label\n @return: A tuple of the generated global frame code, local frame\n code, and a set of targets that have been used in the\n generation of this SDFG.\n "
sdfg_label = (sdfg.name + sdfg_id)
global_stream = CodeIOStream()
callsite_stream = CodeIOStream()
_set_default_schedule_and_storage_types(sdfg, schedule)
if (sdfg.parent is None):
self.generate_header(sdfg, global_stream, callsite_stream)
if (sdfg.parent is not None):
symbols_available = sdfg.parent_sdfg.symbols_defined_at(sdfg)
else:
symbols_available = sdfg.constants
shared_transients = sdfg.shared_transients()
allocated = set()
for state in sdfg.nodes():
for node in state.data_nodes():
if ((node.data in shared_transients) and (node.data not in allocated)):
self._dispatcher.dispatch_allocate(sdfg, state, None, node, global_stream, callsite_stream)
self._dispatcher.dispatch_initialize(sdfg, state, None, node, global_stream, callsite_stream)
allocated.add(node.data)
(assigned, _) = sdfg.interstate_symbols()
for (isvarName, isvarType) in assigned.items():
if (isvarName in allocated):
continue
callsite_stream.write(('%s;\n' % isvarType.signature(with_types=True, name=isvarName)), sdfg)
for argnode in dtypes.deduplicate((sdfg.input_arrays() + sdfg.output_arrays())):
if argnode.desc(sdfg).transient:
continue
self._dispatcher.dispatch_initialize(sdfg, sdfg, None, argnode, global_stream, callsite_stream)
callsite_stream.write('\n', sdfg)
states_topological = list(sdfg.topological_sort(sdfg.start_state))
control_flow = {e: [] for e in sdfg.edges()}
if dace.config.Config.get_bool('optimizer', 'detect_control_flow'):
all_cycles = list(sdfg.find_cycles())
all_cycles = [sorted(c, key=(lambda x: states_topological.index(x))) for c in all_cycles]
starting_nodes = [c[0] for c in all_cycles]
starting_nodes = sorted(starting_nodes, key=(lambda x: states_topological.index(x)))
cycles_by_node = [[c for c in all_cycles if (c[0] == n)] for n in starting_nodes]
for cycles in cycles_by_node:
first_node = cycles[0][0]
last_node = cycles[0][(- 1)]
if (not first_node.is_empty()):
continue
if (not all([(c[(- 1)] == last_node) for c in cycles])):
continue
previous_edge = [e for e in sdfg.in_edges(first_node) if (e.src != last_node)]
if (len(previous_edge) != 1):
continue
previous_edge = previous_edge[0]
back_edge = sdfg.edges_between(last_node, first_node)
if (len(back_edge) != 1):
raise RuntimeError('Expected exactly one edge in cycle')
back_edge = back_edge[0]
internal_nodes = (functools.reduce((lambda a, b: (a | b)), [set(c) for c in cycles]) - {first_node})
exit_edge = [e for e in sdfg.out_edges(first_node) if (e.dst not in (internal_nodes | {first_node}))]
if (len(exit_edge) != 1):
continue
exit_edge = exit_edge[0]
entry_edge = [e for e in sdfg.out_edges(first_node) if (e != exit_edge)]
if (len(entry_edge) != 1):
continue
entry_edge = entry_edge[0]
if ((len(control_flow[entry_edge]) != 0) or (len(control_flow[back_edge]) != 0)):
continue
if ((len(control_flow[previous_edge]) == 1) and isinstance(control_flow[previous_edge][0], dace.graph.edges.LoopEntry)):
loop_parent = control_flow[previous_edge][0].scope
elif ((len(control_flow[exit_edge]) == 1) and isinstance(control_flow[exit_edge][0], dace.graph.edges.LoopBack)):
loop_parent = control_flow[exit_edge][0].scope
elif ((len(control_flow[exit_edge]) == 0) or (len(control_flow[previous_edge]) == 0)):
loop_parent = None
else:
continue
if (entry_edge == back_edge):
continue
if any([(len((set(c) - internal_nodes)) > 1) for c in cycles]):
continue
loop_scope = dace.graph.edges.LoopScope(internal_nodes)
if (((len(previous_edge.data.assignments) > 0) or (len(back_edge.data.assignments) > 0)) and ((len(control_flow[previous_edge]) == 0) or ((len(control_flow[previous_edge]) == 1) and (control_flow[previous_edge][0].scope == loop_parent)))):
control_flow[previous_edge].append(dace.graph.edges.LoopAssignment(loop_scope, previous_edge))
control_flow[entry_edge].append(dace.graph.edges.LoopEntry(loop_scope, entry_edge))
control_flow[exit_edge].append(dace.graph.edges.LoopExit(loop_scope, exit_edge))
control_flow[back_edge].append(dace.graph.edges.LoopBack(loop_scope, back_edge))
candidates = [n for n in states_topological if (sdfg.out_degree(n) == 2)]
for candidate in candidates:
dominators = nx.dominance.dominance_frontiers(sdfg.nx, candidate)
(left_entry, right_entry) = sdfg.out_edges(candidate)
if ((len(control_flow[left_entry]) > 0) or (len(control_flow[right_entry]) > 0)):
continue
(left, right) = (left_entry.dst, right_entry.dst)
dominator = (dominators[left] & dominators[right])
if (len(dominator) != 1):
continue
dominator = next(iter(dominator))
exit_edges = sdfg.in_edges(dominator)
if (len(exit_edges) != 2):
continue
(left_exit, right_exit) = exit_edges
if ((len(control_flow[left_exit]) > 0) or (len(control_flow[right_exit]) > 0)):
continue
left_nodes = DaCeCodeGenerator.all_nodes_between(sdfg, left, dominator)
if (left_nodes is None):
continue
right_nodes = DaCeCodeGenerator.all_nodes_between(sdfg, right, dominator)
if (right_nodes is None):
continue
all_nodes = (left_nodes | right_nodes)
if (len((left_nodes & right_nodes)) > 0):
continue
if_then_else = dace.graph.edges.IfThenElse(candidate, dominator)
has_else = False
if (len(dominators[left]) == 1):
then_scope = dace.graph.edges.IfThenScope(if_then_else, left_nodes)
else_scope = dace.graph.edges.IfElseScope(if_then_else, right_nodes)
control_flow[left_entry].append(dace.graph.edges.IfEntry(then_scope, left_entry))
control_flow[left_exit].append(dace.graph.edges.IfExit(then_scope, left_exit))
control_flow[right_exit].append(dace.graph.edges.IfExit(else_scope, right_exit))
if (len(dominators[right]) == 1):
control_flow[right_entry].append(dace.graph.edges.IfEntry(else_scope, right_entry))
has_else = True
else:
then_scope = dace.graph.edges.IfThenScope(if_then_else, right_nodes)
else_scope = dace.graph.edges.IfElseScope(if_then_else, left_nodes)
control_flow[right_entry].append(dace.graph.edges.IfEntry(then_scope, right_entry))
control_flow[right_exit].append(dace.graph.edges.IfExit(then_scope, right_exit))
control_flow[left_exit].append(dace.graph.edges.IfExit(else_scope, left_exit))
states_generated = set()
generated_edges = set()
self.generate_states(sdfg, 'sdfg', control_flow, global_stream, callsite_stream, set(states_topological), states_generated, generated_edges)
if (len(states_generated) != len(sdfg.nodes())):
raise RuntimeError('Not all states were generated in SDFG {}!\n Generated: {}\n Missing: {}'.format(sdfg.label, [s.label for s in states_generated], [s.label for s in (set(sdfg.nodes()) - states_generated)]))
shared_transients = sdfg.shared_transients()
deallocated = set()
for state in sdfg.nodes():
for node in state.data_nodes():
if ((node.data in shared_transients) and (node.data not in deallocated)):
self._dispatcher.dispatch_deallocate(sdfg, state, None, node, global_stream, callsite_stream)
deallocated.add(node.data)
if (sdfg.parent is None):
self.generate_footer(sdfg, global_stream, callsite_stream)
return (global_stream.getvalue(), callsite_stream.getvalue(), self._dispatcher.used_targets)
|
Generate frame code for a given SDFG, calling registered targets'
code generation callbacks for them to generate their own code.
@param sdfg: The SDFG to generate code for.
@param schedule: The schedule the SDFG is currently located, or
None if the SDFG is top-level.
@param sdfg_id: An optional string id given to the SDFG label
@return: A tuple of the generated global frame code, local frame
code, and a set of targets that have been used in the
generation of this SDFG.
|
dace/codegen/targets/framecode.py
|
generate_code
|
tbennun/dace
| 1
|
python
|
def generate_code(self, sdfg: SDFG, schedule: dtypes.ScheduleType, sdfg_id: str=) -> (str, str, Set[TargetCodeGenerator]):
" Generate frame code for a given SDFG, calling registered targets'\n code generation callbacks for them to generate their own code.\n @param sdfg: The SDFG to generate code for.\n @param schedule: The schedule the SDFG is currently located, or\n None if the SDFG is top-level.\n @param sdfg_id: An optional string id given to the SDFG label\n @return: A tuple of the generated global frame code, local frame\n code, and a set of targets that have been used in the\n generation of this SDFG.\n "
sdfg_label = (sdfg.name + sdfg_id)
global_stream = CodeIOStream()
callsite_stream = CodeIOStream()
_set_default_schedule_and_storage_types(sdfg, schedule)
if (sdfg.parent is None):
self.generate_header(sdfg, global_stream, callsite_stream)
if (sdfg.parent is not None):
symbols_available = sdfg.parent_sdfg.symbols_defined_at(sdfg)
else:
symbols_available = sdfg.constants
shared_transients = sdfg.shared_transients()
allocated = set()
for state in sdfg.nodes():
for node in state.data_nodes():
if ((node.data in shared_transients) and (node.data not in allocated)):
self._dispatcher.dispatch_allocate(sdfg, state, None, node, global_stream, callsite_stream)
self._dispatcher.dispatch_initialize(sdfg, state, None, node, global_stream, callsite_stream)
allocated.add(node.data)
(assigned, _) = sdfg.interstate_symbols()
for (isvarName, isvarType) in assigned.items():
if (isvarName in allocated):
continue
callsite_stream.write(('%s;\n' % isvarType.signature(with_types=True, name=isvarName)), sdfg)
for argnode in dtypes.deduplicate((sdfg.input_arrays() + sdfg.output_arrays())):
if argnode.desc(sdfg).transient:
continue
self._dispatcher.dispatch_initialize(sdfg, sdfg, None, argnode, global_stream, callsite_stream)
callsite_stream.write('\n', sdfg)
states_topological = list(sdfg.topological_sort(sdfg.start_state))
control_flow = {e: [] for e in sdfg.edges()}
if dace.config.Config.get_bool('optimizer', 'detect_control_flow'):
all_cycles = list(sdfg.find_cycles())
all_cycles = [sorted(c, key=(lambda x: states_topological.index(x))) for c in all_cycles]
starting_nodes = [c[0] for c in all_cycles]
starting_nodes = sorted(starting_nodes, key=(lambda x: states_topological.index(x)))
cycles_by_node = [[c for c in all_cycles if (c[0] == n)] for n in starting_nodes]
for cycles in cycles_by_node:
first_node = cycles[0][0]
last_node = cycles[0][(- 1)]
if (not first_node.is_empty()):
continue
if (not all([(c[(- 1)] == last_node) for c in cycles])):
continue
previous_edge = [e for e in sdfg.in_edges(first_node) if (e.src != last_node)]
if (len(previous_edge) != 1):
continue
previous_edge = previous_edge[0]
back_edge = sdfg.edges_between(last_node, first_node)
if (len(back_edge) != 1):
raise RuntimeError('Expected exactly one edge in cycle')
back_edge = back_edge[0]
internal_nodes = (functools.reduce((lambda a, b: (a | b)), [set(c) for c in cycles]) - {first_node})
exit_edge = [e for e in sdfg.out_edges(first_node) if (e.dst not in (internal_nodes | {first_node}))]
if (len(exit_edge) != 1):
continue
exit_edge = exit_edge[0]
entry_edge = [e for e in sdfg.out_edges(first_node) if (e != exit_edge)]
if (len(entry_edge) != 1):
continue
entry_edge = entry_edge[0]
if ((len(control_flow[entry_edge]) != 0) or (len(control_flow[back_edge]) != 0)):
continue
if ((len(control_flow[previous_edge]) == 1) and isinstance(control_flow[previous_edge][0], dace.graph.edges.LoopEntry)):
loop_parent = control_flow[previous_edge][0].scope
elif ((len(control_flow[exit_edge]) == 1) and isinstance(control_flow[exit_edge][0], dace.graph.edges.LoopBack)):
loop_parent = control_flow[exit_edge][0].scope
elif ((len(control_flow[exit_edge]) == 0) or (len(control_flow[previous_edge]) == 0)):
loop_parent = None
else:
continue
if (entry_edge == back_edge):
continue
if any([(len((set(c) - internal_nodes)) > 1) for c in cycles]):
continue
loop_scope = dace.graph.edges.LoopScope(internal_nodes)
if (((len(previous_edge.data.assignments) > 0) or (len(back_edge.data.assignments) > 0)) and ((len(control_flow[previous_edge]) == 0) or ((len(control_flow[previous_edge]) == 1) and (control_flow[previous_edge][0].scope == loop_parent)))):
control_flow[previous_edge].append(dace.graph.edges.LoopAssignment(loop_scope, previous_edge))
control_flow[entry_edge].append(dace.graph.edges.LoopEntry(loop_scope, entry_edge))
control_flow[exit_edge].append(dace.graph.edges.LoopExit(loop_scope, exit_edge))
control_flow[back_edge].append(dace.graph.edges.LoopBack(loop_scope, back_edge))
candidates = [n for n in states_topological if (sdfg.out_degree(n) == 2)]
for candidate in candidates:
dominators = nx.dominance.dominance_frontiers(sdfg.nx, candidate)
(left_entry, right_entry) = sdfg.out_edges(candidate)
if ((len(control_flow[left_entry]) > 0) or (len(control_flow[right_entry]) > 0)):
continue
(left, right) = (left_entry.dst, right_entry.dst)
dominator = (dominators[left] & dominators[right])
if (len(dominator) != 1):
continue
dominator = next(iter(dominator))
exit_edges = sdfg.in_edges(dominator)
if (len(exit_edges) != 2):
continue
(left_exit, right_exit) = exit_edges
if ((len(control_flow[left_exit]) > 0) or (len(control_flow[right_exit]) > 0)):
continue
left_nodes = DaCeCodeGenerator.all_nodes_between(sdfg, left, dominator)
if (left_nodes is None):
continue
right_nodes = DaCeCodeGenerator.all_nodes_between(sdfg, right, dominator)
if (right_nodes is None):
continue
all_nodes = (left_nodes | right_nodes)
if (len((left_nodes & right_nodes)) > 0):
continue
if_then_else = dace.graph.edges.IfThenElse(candidate, dominator)
has_else = False
if (len(dominators[left]) == 1):
then_scope = dace.graph.edges.IfThenScope(if_then_else, left_nodes)
else_scope = dace.graph.edges.IfElseScope(if_then_else, right_nodes)
control_flow[left_entry].append(dace.graph.edges.IfEntry(then_scope, left_entry))
control_flow[left_exit].append(dace.graph.edges.IfExit(then_scope, left_exit))
control_flow[right_exit].append(dace.graph.edges.IfExit(else_scope, right_exit))
if (len(dominators[right]) == 1):
control_flow[right_entry].append(dace.graph.edges.IfEntry(else_scope, right_entry))
has_else = True
else:
then_scope = dace.graph.edges.IfThenScope(if_then_else, right_nodes)
else_scope = dace.graph.edges.IfElseScope(if_then_else, left_nodes)
control_flow[right_entry].append(dace.graph.edges.IfEntry(then_scope, right_entry))
control_flow[right_exit].append(dace.graph.edges.IfExit(then_scope, right_exit))
control_flow[left_exit].append(dace.graph.edges.IfExit(else_scope, left_exit))
states_generated = set()
generated_edges = set()
self.generate_states(sdfg, 'sdfg', control_flow, global_stream, callsite_stream, set(states_topological), states_generated, generated_edges)
if (len(states_generated) != len(sdfg.nodes())):
raise RuntimeError('Not all states were generated in SDFG {}!\n Generated: {}\n Missing: {}'.format(sdfg.label, [s.label for s in states_generated], [s.label for s in (set(sdfg.nodes()) - states_generated)]))
shared_transients = sdfg.shared_transients()
deallocated = set()
for state in sdfg.nodes():
for node in state.data_nodes():
if ((node.data in shared_transients) and (node.data not in deallocated)):
self._dispatcher.dispatch_deallocate(sdfg, state, None, node, global_stream, callsite_stream)
deallocated.add(node.data)
if (sdfg.parent is None):
self.generate_footer(sdfg, global_stream, callsite_stream)
return (global_stream.getvalue(), callsite_stream.getvalue(), self._dispatcher.used_targets)
|
def generate_code(self, sdfg: SDFG, schedule: dtypes.ScheduleType, sdfg_id: str=) -> (str, str, Set[TargetCodeGenerator]):
" Generate frame code for a given SDFG, calling registered targets'\n code generation callbacks for them to generate their own code.\n @param sdfg: The SDFG to generate code for.\n @param schedule: The schedule the SDFG is currently located, or\n None if the SDFG is top-level.\n @param sdfg_id: An optional string id given to the SDFG label\n @return: A tuple of the generated global frame code, local frame\n code, and a set of targets that have been used in the\n generation of this SDFG.\n "
sdfg_label = (sdfg.name + sdfg_id)
global_stream = CodeIOStream()
callsite_stream = CodeIOStream()
_set_default_schedule_and_storage_types(sdfg, schedule)
if (sdfg.parent is None):
self.generate_header(sdfg, global_stream, callsite_stream)
if (sdfg.parent is not None):
symbols_available = sdfg.parent_sdfg.symbols_defined_at(sdfg)
else:
symbols_available = sdfg.constants
shared_transients = sdfg.shared_transients()
allocated = set()
for state in sdfg.nodes():
for node in state.data_nodes():
if ((node.data in shared_transients) and (node.data not in allocated)):
self._dispatcher.dispatch_allocate(sdfg, state, None, node, global_stream, callsite_stream)
self._dispatcher.dispatch_initialize(sdfg, state, None, node, global_stream, callsite_stream)
allocated.add(node.data)
(assigned, _) = sdfg.interstate_symbols()
for (isvarName, isvarType) in assigned.items():
if (isvarName in allocated):
continue
callsite_stream.write(('%s;\n' % isvarType.signature(with_types=True, name=isvarName)), sdfg)
for argnode in dtypes.deduplicate((sdfg.input_arrays() + sdfg.output_arrays())):
if argnode.desc(sdfg).transient:
continue
self._dispatcher.dispatch_initialize(sdfg, sdfg, None, argnode, global_stream, callsite_stream)
callsite_stream.write('\n', sdfg)
states_topological = list(sdfg.topological_sort(sdfg.start_state))
control_flow = {e: [] for e in sdfg.edges()}
if dace.config.Config.get_bool('optimizer', 'detect_control_flow'):
all_cycles = list(sdfg.find_cycles())
all_cycles = [sorted(c, key=(lambda x: states_topological.index(x))) for c in all_cycles]
starting_nodes = [c[0] for c in all_cycles]
starting_nodes = sorted(starting_nodes, key=(lambda x: states_topological.index(x)))
cycles_by_node = [[c for c in all_cycles if (c[0] == n)] for n in starting_nodes]
for cycles in cycles_by_node:
first_node = cycles[0][0]
last_node = cycles[0][(- 1)]
if (not first_node.is_empty()):
continue
if (not all([(c[(- 1)] == last_node) for c in cycles])):
continue
previous_edge = [e for e in sdfg.in_edges(first_node) if (e.src != last_node)]
if (len(previous_edge) != 1):
continue
previous_edge = previous_edge[0]
back_edge = sdfg.edges_between(last_node, first_node)
if (len(back_edge) != 1):
raise RuntimeError('Expected exactly one edge in cycle')
back_edge = back_edge[0]
internal_nodes = (functools.reduce((lambda a, b: (a | b)), [set(c) for c in cycles]) - {first_node})
exit_edge = [e for e in sdfg.out_edges(first_node) if (e.dst not in (internal_nodes | {first_node}))]
if (len(exit_edge) != 1):
continue
exit_edge = exit_edge[0]
entry_edge = [e for e in sdfg.out_edges(first_node) if (e != exit_edge)]
if (len(entry_edge) != 1):
continue
entry_edge = entry_edge[0]
if ((len(control_flow[entry_edge]) != 0) or (len(control_flow[back_edge]) != 0)):
continue
if ((len(control_flow[previous_edge]) == 1) and isinstance(control_flow[previous_edge][0], dace.graph.edges.LoopEntry)):
loop_parent = control_flow[previous_edge][0].scope
elif ((len(control_flow[exit_edge]) == 1) and isinstance(control_flow[exit_edge][0], dace.graph.edges.LoopBack)):
loop_parent = control_flow[exit_edge][0].scope
elif ((len(control_flow[exit_edge]) == 0) or (len(control_flow[previous_edge]) == 0)):
loop_parent = None
else:
continue
if (entry_edge == back_edge):
continue
if any([(len((set(c) - internal_nodes)) > 1) for c in cycles]):
continue
loop_scope = dace.graph.edges.LoopScope(internal_nodes)
if (((len(previous_edge.data.assignments) > 0) or (len(back_edge.data.assignments) > 0)) and ((len(control_flow[previous_edge]) == 0) or ((len(control_flow[previous_edge]) == 1) and (control_flow[previous_edge][0].scope == loop_parent)))):
control_flow[previous_edge].append(dace.graph.edges.LoopAssignment(loop_scope, previous_edge))
control_flow[entry_edge].append(dace.graph.edges.LoopEntry(loop_scope, entry_edge))
control_flow[exit_edge].append(dace.graph.edges.LoopExit(loop_scope, exit_edge))
control_flow[back_edge].append(dace.graph.edges.LoopBack(loop_scope, back_edge))
candidates = [n for n in states_topological if (sdfg.out_degree(n) == 2)]
for candidate in candidates:
dominators = nx.dominance.dominance_frontiers(sdfg.nx, candidate)
(left_entry, right_entry) = sdfg.out_edges(candidate)
if ((len(control_flow[left_entry]) > 0) or (len(control_flow[right_entry]) > 0)):
continue
(left, right) = (left_entry.dst, right_entry.dst)
dominator = (dominators[left] & dominators[right])
if (len(dominator) != 1):
continue
dominator = next(iter(dominator))
exit_edges = sdfg.in_edges(dominator)
if (len(exit_edges) != 2):
continue
(left_exit, right_exit) = exit_edges
if ((len(control_flow[left_exit]) > 0) or (len(control_flow[right_exit]) > 0)):
continue
left_nodes = DaCeCodeGenerator.all_nodes_between(sdfg, left, dominator)
if (left_nodes is None):
continue
right_nodes = DaCeCodeGenerator.all_nodes_between(sdfg, right, dominator)
if (right_nodes is None):
continue
all_nodes = (left_nodes | right_nodes)
if (len((left_nodes & right_nodes)) > 0):
continue
if_then_else = dace.graph.edges.IfThenElse(candidate, dominator)
has_else = False
if (len(dominators[left]) == 1):
then_scope = dace.graph.edges.IfThenScope(if_then_else, left_nodes)
else_scope = dace.graph.edges.IfElseScope(if_then_else, right_nodes)
control_flow[left_entry].append(dace.graph.edges.IfEntry(then_scope, left_entry))
control_flow[left_exit].append(dace.graph.edges.IfExit(then_scope, left_exit))
control_flow[right_exit].append(dace.graph.edges.IfExit(else_scope, right_exit))
if (len(dominators[right]) == 1):
control_flow[right_entry].append(dace.graph.edges.IfEntry(else_scope, right_entry))
has_else = True
else:
then_scope = dace.graph.edges.IfThenScope(if_then_else, right_nodes)
else_scope = dace.graph.edges.IfElseScope(if_then_else, left_nodes)
control_flow[right_entry].append(dace.graph.edges.IfEntry(then_scope, right_entry))
control_flow[right_exit].append(dace.graph.edges.IfExit(then_scope, right_exit))
control_flow[left_exit].append(dace.graph.edges.IfExit(else_scope, left_exit))
states_generated = set()
generated_edges = set()
self.generate_states(sdfg, 'sdfg', control_flow, global_stream, callsite_stream, set(states_topological), states_generated, generated_edges)
if (len(states_generated) != len(sdfg.nodes())):
raise RuntimeError('Not all states were generated in SDFG {}!\n Generated: {}\n Missing: {}'.format(sdfg.label, [s.label for s in states_generated], [s.label for s in (set(sdfg.nodes()) - states_generated)]))
shared_transients = sdfg.shared_transients()
deallocated = set()
for state in sdfg.nodes():
for node in state.data_nodes():
if ((node.data in shared_transients) and (node.data not in deallocated)):
self._dispatcher.dispatch_deallocate(sdfg, state, None, node, global_stream, callsite_stream)
deallocated.add(node.data)
if (sdfg.parent is None):
self.generate_footer(sdfg, global_stream, callsite_stream)
return (global_stream.getvalue(), callsite_stream.getvalue(), self._dispatcher.used_targets)<|docstring|>Generate frame code for a given SDFG, calling registered targets'
code generation callbacks for them to generate their own code.
@param sdfg: The SDFG to generate code for.
@param schedule: The schedule the SDFG is currently located, or
None if the SDFG is top-level.
@param sdfg_id: An optional string id given to the SDFG label
@return: A tuple of the generated global frame code, local frame
code, and a set of targets that have been used in the
generation of this SDFG.<|endoftext|>
|
ba2718c18ff92873c39b5dd365d6b17593b8442f18af27bff07f55552b76b371
|
def __init__(self, sparql_query, sparql_service_url, chart=None, **kwargs):
'\n Constructs all the necessary attributes for the vizKG object\n\n Parameters:\n sparql_query (string): The SPARQL query to retrieve.\n sparql_service_url (string): The SPARQL endpoint URL.\n chart (string): Type of visualization\n '
self.sparql_query = sparql_query
self.sparql_service_url = sparql_service_url
self.chart = set_chart(chart)
self.kwargs = kwargs
self.__data = set_dataframe(sparql_query, sparql_service_url)
self.__candidate_visualization = self.__find_candidate()
self.dataframe = self.__data
self.candidate_visualization = self.__candidate_visualization
|
Constructs all the necessary attributes for the vizKG object
Parameters:
sparql_query (string): The SPARQL query to retrieve.
sparql_service_url (string): The SPARQL endpoint URL.
chart (string): Type of visualization
|
VizKG/visualize.py
|
__init__
|
soblinger/vizkg
| 12
|
python
|
def __init__(self, sparql_query, sparql_service_url, chart=None, **kwargs):
'\n Constructs all the necessary attributes for the vizKG object\n\n Parameters:\n sparql_query (string): The SPARQL query to retrieve.\n sparql_service_url (string): The SPARQL endpoint URL.\n chart (string): Type of visualization\n '
self.sparql_query = sparql_query
self.sparql_service_url = sparql_service_url
self.chart = set_chart(chart)
self.kwargs = kwargs
self.__data = set_dataframe(sparql_query, sparql_service_url)
self.__candidate_visualization = self.__find_candidate()
self.dataframe = self.__data
self.candidate_visualization = self.__candidate_visualization
|
def __init__(self, sparql_query, sparql_service_url, chart=None, **kwargs):
'\n Constructs all the necessary attributes for the vizKG object\n\n Parameters:\n sparql_query (string): The SPARQL query to retrieve.\n sparql_service_url (string): The SPARQL endpoint URL.\n chart (string): Type of visualization\n '
self.sparql_query = sparql_query
self.sparql_service_url = sparql_service_url
self.chart = set_chart(chart)
self.kwargs = kwargs
self.__data = set_dataframe(sparql_query, sparql_service_url)
self.__candidate_visualization = self.__find_candidate()
self.dataframe = self.__data
self.candidate_visualization = self.__candidate_visualization<|docstring|>Constructs all the necessary attributes for the vizKG object
Parameters:
sparql_query (string): The SPARQL query to retrieve.
sparql_service_url (string): The SPARQL endpoint URL.
chart (string): Type of visualization<|endoftext|>
|
3759a223d540bd2d6fb1fc20a3c1a6151c705f0d62924cf263d32d17456d6678
|
def plot(self):
'\n Plot visualization with suitable corresponding chart\n\n '
chart_list = chartdict.keys()
figure = None
if (len(self.__data) != 0):
if (self.chart not in chart_list):
if (len(self.__candidate_visualization) > 1):
print(f'You haven’t selected the chart type for your query result visualization.')
print(f'''Based on your query result data, we suggest to choose one of the following chart type: {self.__candidate_visualization}
''')
self.__plot_randomize(self.__candidate_visualization)
else:
figure = chartdict['table'](self.__data, self.kwargs)
figure.plot()
elif (self.chart in self.__candidate_visualization):
figure = chartdict[self.chart](self.__data, self.kwargs)
figure.plot()
else:
print(f'''Based on your query result data, we suggest to choose one of the following chart type: {self.__candidate_visualization}
''')
else:
print('No matching records found')
|
Plot visualization with suitable corresponding chart
|
VizKG/visualize.py
|
plot
|
soblinger/vizkg
| 12
|
python
|
def plot(self):
'\n \n\n '
chart_list = chartdict.keys()
figure = None
if (len(self.__data) != 0):
if (self.chart not in chart_list):
if (len(self.__candidate_visualization) > 1):
print(f'You haven’t selected the chart type for your query result visualization.')
print(f'Based on your query result data, we suggest to choose one of the following chart type: {self.__candidate_visualization}
')
self.__plot_randomize(self.__candidate_visualization)
else:
figure = chartdict['table'](self.__data, self.kwargs)
figure.plot()
elif (self.chart in self.__candidate_visualization):
figure = chartdict[self.chart](self.__data, self.kwargs)
figure.plot()
else:
print(f'Based on your query result data, we suggest to choose one of the following chart type: {self.__candidate_visualization}
')
else:
print('No matching records found')
|
def plot(self):
'\n \n\n '
chart_list = chartdict.keys()
figure = None
if (len(self.__data) != 0):
if (self.chart not in chart_list):
if (len(self.__candidate_visualization) > 1):
print(f'You haven’t selected the chart type for your query result visualization.')
print(f'Based on your query result data, we suggest to choose one of the following chart type: {self.__candidate_visualization}
')
self.__plot_randomize(self.__candidate_visualization)
else:
figure = chartdict['table'](self.__data, self.kwargs)
figure.plot()
elif (self.chart in self.__candidate_visualization):
figure = chartdict[self.chart](self.__data, self.kwargs)
figure.plot()
else:
print(f'Based on your query result data, we suggest to choose one of the following chart type: {self.__candidate_visualization}
')
else:
print('No matching records found')<|docstring|>Plot visualization with suitable corresponding chart<|endoftext|>
|
d4f71526ab88cab9f37f4a89109739b48fe38b203011d2c6796899a9ab4cc600
|
def __find_candidate(self):
'\n Find candidate of visualization\n\n Returns:\n (list) candidate: List of recommendation chart name \n '
chart_list = list(chartdict.keys())
candidate = []
for (idx, name) in enumerate(chart_list):
check = chartdict[name.lower()](self.__data, self.kwargs)
if check.promote_to_candidate():
candidate.append(name)
return candidate
|
Find candidate of visualization
Returns:
(list) candidate: List of recommendation chart name
|
VizKG/visualize.py
|
__find_candidate
|
soblinger/vizkg
| 12
|
python
|
def __find_candidate(self):
'\n Find candidate of visualization\n\n Returns:\n (list) candidate: List of recommendation chart name \n '
chart_list = list(chartdict.keys())
candidate = []
for (idx, name) in enumerate(chart_list):
check = chartdict[name.lower()](self.__data, self.kwargs)
if check.promote_to_candidate():
candidate.append(name)
return candidate
|
def __find_candidate(self):
'\n Find candidate of visualization\n\n Returns:\n (list) candidate: List of recommendation chart name \n '
chart_list = list(chartdict.keys())
candidate = []
for (idx, name) in enumerate(chart_list):
check = chartdict[name.lower()](self.__data, self.kwargs)
if check.promote_to_candidate():
candidate.append(name)
return candidate<|docstring|>Find candidate of visualization
Returns:
(list) candidate: List of recommendation chart name<|endoftext|>
|
2e261cd399dd4efb08aa8e779ade350e6b104863c735d180468ae1544e513abf
|
def __plot_randomize(self, candidate_visualization):
'\n Plot two of recommendation chart chart\n\n Returns:\n (list) candidate: List of recommendation chart name \n '
list_of_random_items = random.sample(candidate_visualization, 2)
print(f'We show below two of them {tuple(list_of_random_items)} as illustrations: ')
for (idx, name) in enumerate(list_of_random_items):
figure = chartdict[name.lower()](self.__data, self.kwargs)
figure.plot()
|
Plot two of recommendation chart chart
Returns:
(list) candidate: List of recommendation chart name
|
VizKG/visualize.py
|
__plot_randomize
|
soblinger/vizkg
| 12
|
python
|
def __plot_randomize(self, candidate_visualization):
'\n Plot two of recommendation chart chart\n\n Returns:\n (list) candidate: List of recommendation chart name \n '
list_of_random_items = random.sample(candidate_visualization, 2)
print(f'We show below two of them {tuple(list_of_random_items)} as illustrations: ')
for (idx, name) in enumerate(list_of_random_items):
figure = chartdict[name.lower()](self.__data, self.kwargs)
figure.plot()
|
def __plot_randomize(self, candidate_visualization):
'\n Plot two of recommendation chart chart\n\n Returns:\n (list) candidate: List of recommendation chart name \n '
list_of_random_items = random.sample(candidate_visualization, 2)
print(f'We show below two of them {tuple(list_of_random_items)} as illustrations: ')
for (idx, name) in enumerate(list_of_random_items):
figure = chartdict[name.lower()](self.__data, self.kwargs)
figure.plot()<|docstring|>Plot two of recommendation chart chart
Returns:
(list) candidate: List of recommendation chart name<|endoftext|>
|
366b633c6c9fcaab774e54910faa2f260326286b2afaa4a14779d3fe219f1747
|
def backwards(self, orm):
'Write your backwards methods here.'
|
Write your backwards methods here.
|
src/oscar/apps/catalogue/south_migrations/0026_determine_product_structure.py
|
backwards
|
ashish12/django-oscar
| 2
|
python
|
def backwards(self, orm):
|
def backwards(self, orm):
<|docstring|>Write your backwards methods here.<|endoftext|>
|
ba7f7c178e3043ff93297a56a646417d5f527aeb6bbbdb62fad317d3e681aee4
|
def validDate(date_string: str) -> bool:
'\n Validates stringtype dates of type `dd/mm/yyyy`, `dd-mm-yyyy` or `dd.mm.yyyy` from\n years 1900-9999. Leap year support included.\n\n Parameters\n ----------\n date_string : str\n Date to be validated\n\n Returns\n ----------\n boolean\n Whether the date is valid or not\n\n Examples\n ---------\n >>> validDate("11/02/1996")\n True\n >>> validDate("29/02/2016")\n True\n >>> validDate("43/01/1996")\n False\n '
if re.match(((((('^(?:(?:31(\\/|-|\\.)(?:0?[13578]|1[02]))\\1' + '|(?:(?:29|30)(\\/|-|\\.)(?:0?[13-9]|1[0-2])\\2') + '))(?:(?:1[9]..|2[0][0-4].))$|^(?:29(\\/|-|\\.)0?2\\3') + '(?:(?:(?:1[6-9]|[2-9]\\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]') + '|[3579][26])00))))$|^(?:0?[1-9]|1\\d|2[0-8])(\\/|-|\\.)(?:(?:0?[1-9])|(?:1[0-2]))\\4') + '(?:(?:1[9]..|2[0][0-4].))$'), date_string, flags=0):
return True
else:
return False
|
Validates stringtype dates of type `dd/mm/yyyy`, `dd-mm-yyyy` or `dd.mm.yyyy` from
years 1900-9999. Leap year support included.
Parameters
----------
date_string : str
Date to be validated
Returns
----------
boolean
Whether the date is valid or not
Examples
---------
>>> validDate("11/02/1996")
True
>>> validDate("29/02/2016")
True
>>> validDate("43/01/1996")
False
|
codonPython/validation/dateValidator.py
|
validDate
|
NatashaChetwynd/codonPython
| 7
|
python
|
def validDate(date_string: str) -> bool:
'\n Validates stringtype dates of type `dd/mm/yyyy`, `dd-mm-yyyy` or `dd.mm.yyyy` from\n years 1900-9999. Leap year support included.\n\n Parameters\n ----------\n date_string : str\n Date to be validated\n\n Returns\n ----------\n boolean\n Whether the date is valid or not\n\n Examples\n ---------\n >>> validDate("11/02/1996")\n True\n >>> validDate("29/02/2016")\n True\n >>> validDate("43/01/1996")\n False\n '
if re.match(((((('^(?:(?:31(\\/|-|\\.)(?:0?[13578]|1[02]))\\1' + '|(?:(?:29|30)(\\/|-|\\.)(?:0?[13-9]|1[0-2])\\2') + '))(?:(?:1[9]..|2[0][0-4].))$|^(?:29(\\/|-|\\.)0?2\\3') + '(?:(?:(?:1[6-9]|[2-9]\\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]') + '|[3579][26])00))))$|^(?:0?[1-9]|1\\d|2[0-8])(\\/|-|\\.)(?:(?:0?[1-9])|(?:1[0-2]))\\4') + '(?:(?:1[9]..|2[0][0-4].))$'), date_string, flags=0):
return True
else:
return False
|
def validDate(date_string: str) -> bool:
'\n Validates stringtype dates of type `dd/mm/yyyy`, `dd-mm-yyyy` or `dd.mm.yyyy` from\n years 1900-9999. Leap year support included.\n\n Parameters\n ----------\n date_string : str\n Date to be validated\n\n Returns\n ----------\n boolean\n Whether the date is valid or not\n\n Examples\n ---------\n >>> validDate("11/02/1996")\n True\n >>> validDate("29/02/2016")\n True\n >>> validDate("43/01/1996")\n False\n '
if re.match(((((('^(?:(?:31(\\/|-|\\.)(?:0?[13578]|1[02]))\\1' + '|(?:(?:29|30)(\\/|-|\\.)(?:0?[13-9]|1[0-2])\\2') + '))(?:(?:1[9]..|2[0][0-4].))$|^(?:29(\\/|-|\\.)0?2\\3') + '(?:(?:(?:1[6-9]|[2-9]\\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]') + '|[3579][26])00))))$|^(?:0?[1-9]|1\\d|2[0-8])(\\/|-|\\.)(?:(?:0?[1-9])|(?:1[0-2]))\\4') + '(?:(?:1[9]..|2[0][0-4].))$'), date_string, flags=0):
return True
else:
return False<|docstring|>Validates stringtype dates of type `dd/mm/yyyy`, `dd-mm-yyyy` or `dd.mm.yyyy` from
years 1900-9999. Leap year support included.
Parameters
----------
date_string : str
Date to be validated
Returns
----------
boolean
Whether the date is valid or not
Examples
---------
>>> validDate("11/02/1996")
True
>>> validDate("29/02/2016")
True
>>> validDate("43/01/1996")
False<|endoftext|>
|
1672ee1aa02db2c77c719e00f96bf8b39c804b8ac8aba947755c5452c9c3ab05
|
def __init__(self, source_root_config):
'Create an object for querying source roots via patterns in a trie.\n\n :param source_root_config: The SourceRootConfig for the source root patterns to query against.\n\n Non-test code should not instantiate directly. See SourceRootConfig.get_source_roots().\n '
self._trie = source_root_config.create_trie()
self._options = source_root_config.get_options()
|
Create an object for querying source roots via patterns in a trie.
:param source_root_config: The SourceRootConfig for the source root patterns to query against.
Non-test code should not instantiate directly. See SourceRootConfig.get_source_roots().
|
src/python/pants/source/source_root.py
|
__init__
|
dturner-tw/pants
| 0
|
python
|
def __init__(self, source_root_config):
'Create an object for querying source roots via patterns in a trie.\n\n :param source_root_config: The SourceRootConfig for the source root patterns to query against.\n\n Non-test code should not instantiate directly. See SourceRootConfig.get_source_roots().\n '
self._trie = source_root_config.create_trie()
self._options = source_root_config.get_options()
|
def __init__(self, source_root_config):
'Create an object for querying source roots via patterns in a trie.\n\n :param source_root_config: The SourceRootConfig for the source root patterns to query against.\n\n Non-test code should not instantiate directly. See SourceRootConfig.get_source_roots().\n '
self._trie = source_root_config.create_trie()
self._options = source_root_config.get_options()<|docstring|>Create an object for querying source roots via patterns in a trie.
:param source_root_config: The SourceRootConfig for the source root patterns to query against.
Non-test code should not instantiate directly. See SourceRootConfig.get_source_roots().<|endoftext|>
|
0e59dafd712be677d75ec589febbb11efdff7013a9135732719dec7c04361af1
|
def add_source_root(self, path, langs=tuple()):
'Add the specified fixed source root.\n\n Useful in a limited set of circumstances, e.g., when unpacking sources from a jar with\n unknown structure. Tests should prefer to use dirs that match our source root patterns\n instead of explicitly setting source roots here.\n '
if os.path.isabs(path):
path = os.path.relpath(path, get_buildroot())
self._trie.add_fixed(path, langs)
|
Add the specified fixed source root.
Useful in a limited set of circumstances, e.g., when unpacking sources from a jar with
unknown structure. Tests should prefer to use dirs that match our source root patterns
instead of explicitly setting source roots here.
|
src/python/pants/source/source_root.py
|
add_source_root
|
dturner-tw/pants
| 0
|
python
|
def add_source_root(self, path, langs=tuple()):
'Add the specified fixed source root.\n\n Useful in a limited set of circumstances, e.g., when unpacking sources from a jar with\n unknown structure. Tests should prefer to use dirs that match our source root patterns\n instead of explicitly setting source roots here.\n '
if os.path.isabs(path):
path = os.path.relpath(path, get_buildroot())
self._trie.add_fixed(path, langs)
|
def add_source_root(self, path, langs=tuple()):
'Add the specified fixed source root.\n\n Useful in a limited set of circumstances, e.g., when unpacking sources from a jar with\n unknown structure. Tests should prefer to use dirs that match our source root patterns\n instead of explicitly setting source roots here.\n '
if os.path.isabs(path):
path = os.path.relpath(path, get_buildroot())
self._trie.add_fixed(path, langs)<|docstring|>Add the specified fixed source root.
Useful in a limited set of circumstances, e.g., when unpacking sources from a jar with
unknown structure. Tests should prefer to use dirs that match our source root patterns
instead of explicitly setting source roots here.<|endoftext|>
|
a451281936420871480428cfd0968d02a6be9377f16b8e7b656a89d591b94903
|
def find(self, target):
'Find the source root for the given target, or None.\n\n :param target: Find the source root for this target.\n :return: A SourceRoot instance.\n '
return self.find_by_path(target.address.spec_path)
|
Find the source root for the given target, or None.
:param target: Find the source root for this target.
:return: A SourceRoot instance.
|
src/python/pants/source/source_root.py
|
find
|
dturner-tw/pants
| 0
|
python
|
def find(self, target):
'Find the source root for the given target, or None.\n\n :param target: Find the source root for this target.\n :return: A SourceRoot instance.\n '
return self.find_by_path(target.address.spec_path)
|
def find(self, target):
'Find the source root for the given target, or None.\n\n :param target: Find the source root for this target.\n :return: A SourceRoot instance.\n '
return self.find_by_path(target.address.spec_path)<|docstring|>Find the source root for the given target, or None.
:param target: Find the source root for this target.
:return: A SourceRoot instance.<|endoftext|>
|
46ff216ec2a986488808bb134afb31e444600fe366a9cfd4bbc5021307553ee7
|
def find_by_path(self, path):
'Find the source root for the given path, or None.\n\n :param path: Find the source root for this path.\n :return: A SourceRoot instance, or None if the path is not located under a source root\n and `unmatched==fail`.\n '
if os.path.isabs(path):
path = os.path.relpath(path, get_buildroot())
matched = self._trie.find(path)
if matched:
return matched
elif (self._options.unmatched == 'fail'):
return None
elif (self._options.unmatched == 'create'):
return SourceRoot(path, [])
|
Find the source root for the given path, or None.
:param path: Find the source root for this path.
:return: A SourceRoot instance, or None if the path is not located under a source root
and `unmatched==fail`.
|
src/python/pants/source/source_root.py
|
find_by_path
|
dturner-tw/pants
| 0
|
python
|
def find_by_path(self, path):
'Find the source root for the given path, or None.\n\n :param path: Find the source root for this path.\n :return: A SourceRoot instance, or None if the path is not located under a source root\n and `unmatched==fail`.\n '
if os.path.isabs(path):
path = os.path.relpath(path, get_buildroot())
matched = self._trie.find(path)
if matched:
return matched
elif (self._options.unmatched == 'fail'):
return None
elif (self._options.unmatched == 'create'):
return SourceRoot(path, [])
|
def find_by_path(self, path):
'Find the source root for the given path, or None.\n\n :param path: Find the source root for this path.\n :return: A SourceRoot instance, or None if the path is not located under a source root\n and `unmatched==fail`.\n '
if os.path.isabs(path):
path = os.path.relpath(path, get_buildroot())
matched = self._trie.find(path)
if matched:
return matched
elif (self._options.unmatched == 'fail'):
return None
elif (self._options.unmatched == 'create'):
return SourceRoot(path, [])<|docstring|>Find the source root for the given path, or None.
:param path: Find the source root for this path.
:return: A SourceRoot instance, or None if the path is not located under a source root
and `unmatched==fail`.<|endoftext|>
|
3e6f7dc557f2f0b075fcfb033d314b7f6d40642c17c188d1bc256a074d467d0f
|
def all_roots(self):
"Return all known source roots.\n\n Returns a generator over (source root, list of langs) pairs.\n\n Note: Requires a directory walk to match actual directories against patterns.\n However we don't descend into source roots, once found, so this should be fast in practice.\n Note: Does not follow symlinks.\n "
buildroot = get_buildroot()
ignore = {'.git'}.union({os.path.relpath(self._options[k], buildroot) for k in ['pants_workdir', 'pants_supportdir', 'pants_distdir']})
for (dirpath, dirnames, _) in os.walk(buildroot, topdown=True):
relpath = os.path.relpath(dirpath, buildroot)
if (relpath in ignore):
del dirnames[:]
else:
match = self._trie.find(relpath)
if match:
(yield match)
del dirnames[:]
|
Return all known source roots.
Returns a generator over (source root, list of langs) pairs.
Note: Requires a directory walk to match actual directories against patterns.
However we don't descend into source roots, once found, so this should be fast in practice.
Note: Does not follow symlinks.
|
src/python/pants/source/source_root.py
|
all_roots
|
dturner-tw/pants
| 0
|
python
|
def all_roots(self):
"Return all known source roots.\n\n Returns a generator over (source root, list of langs) pairs.\n\n Note: Requires a directory walk to match actual directories against patterns.\n However we don't descend into source roots, once found, so this should be fast in practice.\n Note: Does not follow symlinks.\n "
buildroot = get_buildroot()
ignore = {'.git'}.union({os.path.relpath(self._options[k], buildroot) for k in ['pants_workdir', 'pants_supportdir', 'pants_distdir']})
for (dirpath, dirnames, _) in os.walk(buildroot, topdown=True):
relpath = os.path.relpath(dirpath, buildroot)
if (relpath in ignore):
del dirnames[:]
else:
match = self._trie.find(relpath)
if match:
(yield match)
del dirnames[:]
|
def all_roots(self):
"Return all known source roots.\n\n Returns a generator over (source root, list of langs) pairs.\n\n Note: Requires a directory walk to match actual directories against patterns.\n However we don't descend into source roots, once found, so this should be fast in practice.\n Note: Does not follow symlinks.\n "
buildroot = get_buildroot()
ignore = {'.git'}.union({os.path.relpath(self._options[k], buildroot) for k in ['pants_workdir', 'pants_supportdir', 'pants_distdir']})
for (dirpath, dirnames, _) in os.walk(buildroot, topdown=True):
relpath = os.path.relpath(dirpath, buildroot)
if (relpath in ignore):
del dirnames[:]
else:
match = self._trie.find(relpath)
if match:
(yield match)
del dirnames[:]<|docstring|>Return all known source roots.
Returns a generator over (source root, list of langs) pairs.
Note: Requires a directory walk to match actual directories against patterns.
However we don't descend into source roots, once found, so this should be fast in practice.
Note: Does not follow symlinks.<|endoftext|>
|
33c193d0cc7b921567df09f87f65615504cb697e4ae76c377d35c1e721a0ad83
|
def create_trie(self):
'Create a trie of source root patterns from options.'
options = self.get_options()
trie = SourceRootTrie(options.lang_canonicalizations)
for pattern in (options.source_root_patterns or []):
trie.add_pattern(pattern)
for pattern in (options.test_root_patterns or []):
trie.add_pattern(pattern)
for (path, langs) in (options.source_roots or {}).items():
trie.add_fixed(path, langs)
for (path, langs) in (options.test_roots or {}).items():
trie.add_fixed(path, langs)
return trie
|
Create a trie of source root patterns from options.
|
src/python/pants/source/source_root.py
|
create_trie
|
dturner-tw/pants
| 0
|
python
|
def create_trie(self):
options = self.get_options()
trie = SourceRootTrie(options.lang_canonicalizations)
for pattern in (options.source_root_patterns or []):
trie.add_pattern(pattern)
for pattern in (options.test_root_patterns or []):
trie.add_pattern(pattern)
for (path, langs) in (options.source_roots or {}).items():
trie.add_fixed(path, langs)
for (path, langs) in (options.test_roots or {}).items():
trie.add_fixed(path, langs)
return trie
|
def create_trie(self):
options = self.get_options()
trie = SourceRootTrie(options.lang_canonicalizations)
for pattern in (options.source_root_patterns or []):
trie.add_pattern(pattern)
for pattern in (options.test_root_patterns or []):
trie.add_pattern(pattern)
for (path, langs) in (options.source_roots or {}).items():
trie.add_fixed(path, langs)
for (path, langs) in (options.test_roots or {}).items():
trie.add_fixed(path, langs)
return trie<|docstring|>Create a trie of source root patterns from options.<|endoftext|>
|
e9a6d4d97459176137227c72b37b433f71d65fed1dbf6be89535fb422bd368f7
|
def add_pattern(self, pattern):
'Add a pattern to the trie.'
self._do_add_pattern(pattern, tuple())
|
Add a pattern to the trie.
|
src/python/pants/source/source_root.py
|
add_pattern
|
dturner-tw/pants
| 0
|
python
|
def add_pattern(self, pattern):
self._do_add_pattern(pattern, tuple())
|
def add_pattern(self, pattern):
self._do_add_pattern(pattern, tuple())<|docstring|>Add a pattern to the trie.<|endoftext|>
|
c5f18cfb37cbc089298c38270b39fa711947c131fb6ab374a6cf193ab28dc617
|
def add_fixed(self, path, langs=None):
'Add a fixed source root to the trie.'
self._do_add_pattern(os.path.join('^', path), tuple(langs))
|
Add a fixed source root to the trie.
|
src/python/pants/source/source_root.py
|
add_fixed
|
dturner-tw/pants
| 0
|
python
|
def add_fixed(self, path, langs=None):
self._do_add_pattern(os.path.join('^', path), tuple(langs))
|
def add_fixed(self, path, langs=None):
self._do_add_pattern(os.path.join('^', path), tuple(langs))<|docstring|>Add a fixed source root to the trie.<|endoftext|>
|
45bd112f983e020aa9337bc7ea37df8131914df019773e74f999a250905eba7e
|
def find(self, path):
'Find the source root for the given path.'
keys = (['^'] + path.split(os.path.sep))
for i in range(len(keys)):
node = self._root
langs = set()
j = i
while (j < len(keys)):
child = node.get_child(keys[j], langs)
if (child is None):
break
else:
node = child
j += 1
if node.is_terminal:
return SourceRoot(os.path.join(*keys[1:j]), self._canonicalize_langs(langs))
return None
|
Find the source root for the given path.
|
src/python/pants/source/source_root.py
|
find
|
dturner-tw/pants
| 0
|
python
|
def find(self, path):
keys = (['^'] + path.split(os.path.sep))
for i in range(len(keys)):
node = self._root
langs = set()
j = i
while (j < len(keys)):
child = node.get_child(keys[j], langs)
if (child is None):
break
else:
node = child
j += 1
if node.is_terminal:
return SourceRoot(os.path.join(*keys[1:j]), self._canonicalize_langs(langs))
return None
|
def find(self, path):
keys = (['^'] + path.split(os.path.sep))
for i in range(len(keys)):
node = self._root
langs = set()
j = i
while (j < len(keys)):
child = node.get_child(keys[j], langs)
if (child is None):
break
else:
node = child
j += 1
if node.is_terminal:
return SourceRoot(os.path.join(*keys[1:j]), self._canonicalize_langs(langs))
return None<|docstring|>Find the source root for the given path.<|endoftext|>
|
9a1b6a96fcb251d272c086f5a333d3ada7611c3658a3f6555f4e21ed3030ff37
|
def get_child(self, key, langs):
'Return the child node for the given key, or None if no such child.\n\n :param key: The child to return.\n :param langs: An output parameter which we update with any langs associated with the child.\n '
ret = self.children.get(key)
if ret:
langs.update(ret.langs)
else:
ret = self.children.get('*')
if ret:
langs.add(key)
return ret
|
Return the child node for the given key, or None if no such child.
:param key: The child to return.
:param langs: An output parameter which we update with any langs associated with the child.
|
src/python/pants/source/source_root.py
|
get_child
|
dturner-tw/pants
| 0
|
python
|
def get_child(self, key, langs):
'Return the child node for the given key, or None if no such child.\n\n :param key: The child to return.\n :param langs: An output parameter which we update with any langs associated with the child.\n '
ret = self.children.get(key)
if ret:
langs.update(ret.langs)
else:
ret = self.children.get('*')
if ret:
langs.add(key)
return ret
|
def get_child(self, key, langs):
'Return the child node for the given key, or None if no such child.\n\n :param key: The child to return.\n :param langs: An output parameter which we update with any langs associated with the child.\n '
ret = self.children.get(key)
if ret:
langs.update(ret.langs)
else:
ret = self.children.get('*')
if ret:
langs.add(key)
return ret<|docstring|>Return the child node for the given key, or None if no such child.
:param key: The child to return.
:param langs: An output parameter which we update with any langs associated with the child.<|endoftext|>
|
98486cc085deacb1cd5029f3eb899d9128b1dbaa260ce4d32306dd2fb9444450
|
def __init__(self, consts, Nij, state=None):
'Initialize the compartment indices and the state vector using the calling modules numerical libs'
reimport_numerical_libs('model.state.buckyState.__init__')
self.En = consts['En']
self.Im = consts['Im']
self.Rhn = consts['Rhn']
self.consts = consts
bin_counts = {}
for name in ('S', 'R', 'D', 'incH', 'incC'):
bin_counts[name] = 1
for name in ('I', 'Ic', 'Ia'):
bin_counts[name] = self.Im
bin_counts['E'] = self.En
bin_counts['Rh'] = self.Rhn
indices = {}
current_index = 0
for (name, nbins) in bin_counts.items():
indices[name] = slice(current_index, (current_index + nbins))
current_index = (current_index + nbins)
indices['N'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if ('inc' not in k)])
indices['Itot'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if (k in ('I', 'Ia', 'Ic'))])
indices['H'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if (k in ('Ic', 'Rh'))])
self.indices = indices
self.n_compartments = xp.to_cpu(sum([n for n in bin_counts.values()])).item()
(self.n_age_grps, self.n_nodes) = Nij.shape
if (state is None):
self.state = xp.zeros(self.state_shape)
else:
self.state = state
|
Initialize the compartment indices and the state vector using the calling modules numerical libs
|
bucky/model/state.py
|
__init__
|
lshin-apl/bucky
| 0
|
python
|
def __init__(self, consts, Nij, state=None):
reimport_numerical_libs('model.state.buckyState.__init__')
self.En = consts['En']
self.Im = consts['Im']
self.Rhn = consts['Rhn']
self.consts = consts
bin_counts = {}
for name in ('S', 'R', 'D', 'incH', 'incC'):
bin_counts[name] = 1
for name in ('I', 'Ic', 'Ia'):
bin_counts[name] = self.Im
bin_counts['E'] = self.En
bin_counts['Rh'] = self.Rhn
indices = {}
current_index = 0
for (name, nbins) in bin_counts.items():
indices[name] = slice(current_index, (current_index + nbins))
current_index = (current_index + nbins)
indices['N'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if ('inc' not in k)])
indices['Itot'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if (k in ('I', 'Ia', 'Ic'))])
indices['H'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if (k in ('Ic', 'Rh'))])
self.indices = indices
self.n_compartments = xp.to_cpu(sum([n for n in bin_counts.values()])).item()
(self.n_age_grps, self.n_nodes) = Nij.shape
if (state is None):
self.state = xp.zeros(self.state_shape)
else:
self.state = state
|
def __init__(self, consts, Nij, state=None):
reimport_numerical_libs('model.state.buckyState.__init__')
self.En = consts['En']
self.Im = consts['Im']
self.Rhn = consts['Rhn']
self.consts = consts
bin_counts = {}
for name in ('S', 'R', 'D', 'incH', 'incC'):
bin_counts[name] = 1
for name in ('I', 'Ic', 'Ia'):
bin_counts[name] = self.Im
bin_counts['E'] = self.En
bin_counts['Rh'] = self.Rhn
indices = {}
current_index = 0
for (name, nbins) in bin_counts.items():
indices[name] = slice(current_index, (current_index + nbins))
current_index = (current_index + nbins)
indices['N'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if ('inc' not in k)])
indices['Itot'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if (k in ('I', 'Ia', 'Ic'))])
indices['H'] = xp.concatenate([xp.r_[slice_to_cpu(v)] for (k, v) in indices.items() if (k in ('Ic', 'Rh'))])
self.indices = indices
self.n_compartments = xp.to_cpu(sum([n for n in bin_counts.values()])).item()
(self.n_age_grps, self.n_nodes) = Nij.shape
if (state is None):
self.state = xp.zeros(self.state_shape)
else:
self.state = state<|docstring|>Initialize the compartment indices and the state vector using the calling modules numerical libs<|endoftext|>
|
94839afd59bb2c599b236758676465dc89fc032e23c334dcd3cbef6a5df23e09
|
def zeros_like(self):
'Return a mostly shallow copy of self but with a zeroed out self.state'
ret = copy.copy(self)
ret.state = xp.zeros_like(self.state)
return ret
|
Return a mostly shallow copy of self but with a zeroed out self.state
|
bucky/model/state.py
|
zeros_like
|
lshin-apl/bucky
| 0
|
python
|
def zeros_like(self):
ret = copy.copy(self)
ret.state = xp.zeros_like(self.state)
return ret
|
def zeros_like(self):
ret = copy.copy(self)
ret.state = xp.zeros_like(self.state)
return ret<|docstring|>Return a mostly shallow copy of self but with a zeroed out self.state<|endoftext|>
|
cc498a8e142de3bf25645d9a9e57efff0b24a940234fd886a64b447a03dc7de2
|
def __getattribute__(self, attr):
"Allow for . access to the compartment indices, otherwise return the 'normal' attribute."
with contextlib.suppress(AttributeError):
if (attr in super().__getattribute__('indices')):
out = self.state[self.indices[attr]]
if (out.shape[0] == 1):
out = xp.squeeze(out, axis=0)
return out
return super().__getattribute__(attr)
|
Allow for . access to the compartment indices, otherwise return the 'normal' attribute.
|
bucky/model/state.py
|
__getattribute__
|
lshin-apl/bucky
| 0
|
python
|
def __getattribute__(self, attr):
with contextlib.suppress(AttributeError):
if (attr in super().__getattribute__('indices')):
out = self.state[self.indices[attr]]
if (out.shape[0] == 1):
out = xp.squeeze(out, axis=0)
return out
return super().__getattribute__(attr)
|
def __getattribute__(self, attr):
with contextlib.suppress(AttributeError):
if (attr in super().__getattribute__('indices')):
out = self.state[self.indices[attr]]
if (out.shape[0] == 1):
out = xp.squeeze(out, axis=0)
return out
return super().__getattribute__(attr)<|docstring|>Allow for . access to the compartment indices, otherwise return the 'normal' attribute.<|endoftext|>
|
3a42c1b201ea670fc3ea2b579797096fbac918a3a9f0110eace9a6aeee4a248d
|
def __setattr__(self, attr, x):
'Allow setting of compartments using . notation, otherwise default to normal attribute behavior.'
try:
if (attr in super().__getattribute__('indices')):
self.state[self.indices[attr]] = x
else:
super().__setattr__(attr, x)
except AttributeError:
super().__setattr__(attr, x)
|
Allow setting of compartments using . notation, otherwise default to normal attribute behavior.
|
bucky/model/state.py
|
__setattr__
|
lshin-apl/bucky
| 0
|
python
|
def __setattr__(self, attr, x):
try:
if (attr in super().__getattribute__('indices')):
self.state[self.indices[attr]] = x
else:
super().__setattr__(attr, x)
except AttributeError:
super().__setattr__(attr, x)
|
def __setattr__(self, attr, x):
try:
if (attr in super().__getattribute__('indices')):
self.state[self.indices[attr]] = x
else:
super().__setattr__(attr, x)
except AttributeError:
super().__setattr__(attr, x)<|docstring|>Allow setting of compartments using . notation, otherwise default to normal attribute behavior.<|endoftext|>
|
775253809effeec560d567712e208eb85d70c5fcb20aae228f5bf606aac2ec18
|
@property
def state_shape(self):
'Return the shape of the internal state ndarray.'
return (self.n_compartments, self.n_age_grps, self.n_nodes)
|
Return the shape of the internal state ndarray.
|
bucky/model/state.py
|
state_shape
|
lshin-apl/bucky
| 0
|
python
|
@property
def state_shape(self):
return (self.n_compartments, self.n_age_grps, self.n_nodes)
|
@property
def state_shape(self):
return (self.n_compartments, self.n_age_grps, self.n_nodes)<|docstring|>Return the shape of the internal state ndarray.<|endoftext|>
|
007f563545fc793994628d33e04b247a00ada46939b3332c8d873ba55072e151
|
def init_S(self):
'Init the S compartment such that N=1.'
self.S = (1.0 - xp.sum(self.state, axis=0))
|
Init the S compartment such that N=1.
|
bucky/model/state.py
|
init_S
|
lshin-apl/bucky
| 0
|
python
|
def init_S(self):
self.S = (1.0 - xp.sum(self.state, axis=0))
|
def init_S(self):
self.S = (1.0 - xp.sum(self.state, axis=0))<|docstring|>Init the S compartment such that N=1.<|endoftext|>
|
4169199182b1083f1f46f639ba323e082b90a97fbd9aeae014895ea197f591af
|
def dump_nwb(nwb_path):
'\n Print out nwb contents\n\n Args:\n nwb_path (str): path to the nwb file\n\n Returns:\n '
with pynwb.NWBHDF5IO(nwb_path, 'r') as io:
nwbfile = io.read()
for interface in nwbfile.processing['Face Rhythm'].data_interfaces:
print(interface)
time_series_list = list(nwbfile.processing['Face Rhythm'][interface].time_series.keys())
for (ii, time_series) in enumerate(time_series_list):
data_tmp = nwbfile.processing['Face Rhythm'][interface][time_series].data
print(f' {time_series}: {data_tmp.shape} , {data_tmp.dtype} , {round(((data_tmp.size * data_tmp.dtype.itemsize) / 1000000000), 6)} GB')
|
Print out nwb contents
Args:
nwb_path (str): path to the nwb file
Returns:
|
helpers.py
|
dump_nwb
|
RichieHakim/NBAP
| 0
|
python
|
def dump_nwb(nwb_path):
'\n Print out nwb contents\n\n Args:\n nwb_path (str): path to the nwb file\n\n Returns:\n '
with pynwb.NWBHDF5IO(nwb_path, 'r') as io:
nwbfile = io.read()
for interface in nwbfile.processing['Face Rhythm'].data_interfaces:
print(interface)
time_series_list = list(nwbfile.processing['Face Rhythm'][interface].time_series.keys())
for (ii, time_series) in enumerate(time_series_list):
data_tmp = nwbfile.processing['Face Rhythm'][interface][time_series].data
print(f' {time_series}: {data_tmp.shape} , {data_tmp.dtype} , {round(((data_tmp.size * data_tmp.dtype.itemsize) / 1000000000), 6)} GB')
|
def dump_nwb(nwb_path):
'\n Print out nwb contents\n\n Args:\n nwb_path (str): path to the nwb file\n\n Returns:\n '
with pynwb.NWBHDF5IO(nwb_path, 'r') as io:
nwbfile = io.read()
for interface in nwbfile.processing['Face Rhythm'].data_interfaces:
print(interface)
time_series_list = list(nwbfile.processing['Face Rhythm'][interface].time_series.keys())
for (ii, time_series) in enumerate(time_series_list):
data_tmp = nwbfile.processing['Face Rhythm'][interface][time_series].data
print(f' {time_series}: {data_tmp.shape} , {data_tmp.dtype} , {round(((data_tmp.size * data_tmp.dtype.itemsize) / 1000000000), 6)} GB')<|docstring|>Print out nwb contents
Args:
nwb_path (str): path to the nwb file
Returns:<|endoftext|>
|
c335d3d9d7bd3a11c4254e0f6dac4669c27e9b745af2926cf7ce6061bf3cb1ac
|
def __init__(self, *packages, build_tree=True, build_dependencies=True, enforce_init=True):
'\n Initialization method.\n\n Args:\n *packages (args): list of packages to search for.\n build_tree (bool): auto-build the tree or not.\n build_dependencies (bool): auto-build the dependencies or not.\n enforce_init (bool):\n if True, only treat directories if they contain an\n ``__init__.py`` file.\n '
self.finder = Finder()
self.specs = []
self.not_found = []
self.enforce_init = enforce_init
specs = []
for package in packages:
spec = self.finder.find(package, enforce_init=enforce_init)
if spec:
specs.append(spec)
else:
self.not_found.append(package)
if (not specs):
print('** dependenpy: DSM empty.', file=sys.stderr)
self.specs = PackageSpec.combine(specs)
for m in self.not_found:
print(('** dependenpy: Not found: %s.' % m), file=sys.stderr)
super().__init__(build_tree)
if (build_tree and build_dependencies):
self.build_dependencies()
|
Initialization method.
Args:
*packages (args): list of packages to search for.
build_tree (bool): auto-build the tree or not.
build_dependencies (bool): auto-build the dependencies or not.
enforce_init (bool):
if True, only treat directories if they contain an
``__init__.py`` file.
|
src/dependenpy/dsm.py
|
__init__
|
pawamoy/dependenpy
| 10
|
python
|
def __init__(self, *packages, build_tree=True, build_dependencies=True, enforce_init=True):
'\n Initialization method.\n\n Args:\n *packages (args): list of packages to search for.\n build_tree (bool): auto-build the tree or not.\n build_dependencies (bool): auto-build the dependencies or not.\n enforce_init (bool):\n if True, only treat directories if they contain an\n ``__init__.py`` file.\n '
self.finder = Finder()
self.specs = []
self.not_found = []
self.enforce_init = enforce_init
specs = []
for package in packages:
spec = self.finder.find(package, enforce_init=enforce_init)
if spec:
specs.append(spec)
else:
self.not_found.append(package)
if (not specs):
print('** dependenpy: DSM empty.', file=sys.stderr)
self.specs = PackageSpec.combine(specs)
for m in self.not_found:
print(('** dependenpy: Not found: %s.' % m), file=sys.stderr)
super().__init__(build_tree)
if (build_tree and build_dependencies):
self.build_dependencies()
|
def __init__(self, *packages, build_tree=True, build_dependencies=True, enforce_init=True):
'\n Initialization method.\n\n Args:\n *packages (args): list of packages to search for.\n build_tree (bool): auto-build the tree or not.\n build_dependencies (bool): auto-build the dependencies or not.\n enforce_init (bool):\n if True, only treat directories if they contain an\n ``__init__.py`` file.\n '
self.finder = Finder()
self.specs = []
self.not_found = []
self.enforce_init = enforce_init
specs = []
for package in packages:
spec = self.finder.find(package, enforce_init=enforce_init)
if spec:
specs.append(spec)
else:
self.not_found.append(package)
if (not specs):
print('** dependenpy: DSM empty.', file=sys.stderr)
self.specs = PackageSpec.combine(specs)
for m in self.not_found:
print(('** dependenpy: Not found: %s.' % m), file=sys.stderr)
super().__init__(build_tree)
if (build_tree and build_dependencies):
self.build_dependencies()<|docstring|>Initialization method.
Args:
*packages (args): list of packages to search for.
build_tree (bool): auto-build the tree or not.
build_dependencies (bool): auto-build the dependencies or not.
enforce_init (bool):
if True, only treat directories if they contain an
``__init__.py`` file.<|endoftext|>
|
ef9cba59d13be170c27cc018a920248b7de129b038b921165def8599083bbb9d
|
@property
def isdsm(self):
'Inherited from NodeMixin. Always True.'
return True
|
Inherited from NodeMixin. Always True.
|
src/dependenpy/dsm.py
|
isdsm
|
pawamoy/dependenpy
| 10
|
python
|
@property
def isdsm(self):
return True
|
@property
def isdsm(self):
return True<|docstring|>Inherited from NodeMixin. Always True.<|endoftext|>
|
1db1b05baebddc8aefdd77b7cfed657cf58978647d93ebf6e8b9b3a110d4f2ee
|
def build_tree(self):
'Build the Python packages tree.'
for spec in self.specs:
if spec.ismodule:
self.modules.append(Module(spec.name, spec.path, dsm=self))
else:
self.packages.append(Package(spec.name, spec.path, dsm=self, limit_to=spec.limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
|
Build the Python packages tree.
|
src/dependenpy/dsm.py
|
build_tree
|
pawamoy/dependenpy
| 10
|
python
|
def build_tree(self):
for spec in self.specs:
if spec.ismodule:
self.modules.append(Module(spec.name, spec.path, dsm=self))
else:
self.packages.append(Package(spec.name, spec.path, dsm=self, limit_to=spec.limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
|
def build_tree(self):
for spec in self.specs:
if spec.ismodule:
self.modules.append(Module(spec.name, spec.path, dsm=self))
else:
self.packages.append(Package(spec.name, spec.path, dsm=self, limit_to=spec.limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))<|docstring|>Build the Python packages tree.<|endoftext|>
|
9328962580c655beb11b6b7fcc1af93ced6ba169560b9d2d165ab7b20cfb5f98
|
def __init__(self, name, path, dsm=None, package=None, limit_to=None, build_tree=True, build_dependencies=True, enforce_init=True):
'\n Initialization method.\n\n Args:\n name (str): name of the package.\n path (str): path to the package.\n dsm (DSM): parent DSM.\n package (Package): parent package.\n limit_to (list of str):\n list of string to limit the recursive tree-building to\n what is specified.\n build_tree (bool): auto-build the tree or not.\n build_dependencies (bool): auto-build the dependencies or not.\n enforce_init (bool):\n if True, only treat directories if they contain an\n ``__init__.py`` file.\n '
self.name = name
self.path = path
self.package = package
self.dsm = dsm
self.limit_to = (limit_to or [])
self.enforce_init = enforce_init
RootNode.__init__(self, build_tree)
LeafNode.__init__(self)
if (build_tree and build_dependencies):
self.build_dependencies()
|
Initialization method.
Args:
name (str): name of the package.
path (str): path to the package.
dsm (DSM): parent DSM.
package (Package): parent package.
limit_to (list of str):
list of string to limit the recursive tree-building to
what is specified.
build_tree (bool): auto-build the tree or not.
build_dependencies (bool): auto-build the dependencies or not.
enforce_init (bool):
if True, only treat directories if they contain an
``__init__.py`` file.
|
src/dependenpy/dsm.py
|
__init__
|
pawamoy/dependenpy
| 10
|
python
|
def __init__(self, name, path, dsm=None, package=None, limit_to=None, build_tree=True, build_dependencies=True, enforce_init=True):
'\n Initialization method.\n\n Args:\n name (str): name of the package.\n path (str): path to the package.\n dsm (DSM): parent DSM.\n package (Package): parent package.\n limit_to (list of str):\n list of string to limit the recursive tree-building to\n what is specified.\n build_tree (bool): auto-build the tree or not.\n build_dependencies (bool): auto-build the dependencies or not.\n enforce_init (bool):\n if True, only treat directories if they contain an\n ``__init__.py`` file.\n '
self.name = name
self.path = path
self.package = package
self.dsm = dsm
self.limit_to = (limit_to or [])
self.enforce_init = enforce_init
RootNode.__init__(self, build_tree)
LeafNode.__init__(self)
if (build_tree and build_dependencies):
self.build_dependencies()
|
def __init__(self, name, path, dsm=None, package=None, limit_to=None, build_tree=True, build_dependencies=True, enforce_init=True):
'\n Initialization method.\n\n Args:\n name (str): name of the package.\n path (str): path to the package.\n dsm (DSM): parent DSM.\n package (Package): parent package.\n limit_to (list of str):\n list of string to limit the recursive tree-building to\n what is specified.\n build_tree (bool): auto-build the tree or not.\n build_dependencies (bool): auto-build the dependencies or not.\n enforce_init (bool):\n if True, only treat directories if they contain an\n ``__init__.py`` file.\n '
self.name = name
self.path = path
self.package = package
self.dsm = dsm
self.limit_to = (limit_to or [])
self.enforce_init = enforce_init
RootNode.__init__(self, build_tree)
LeafNode.__init__(self)
if (build_tree and build_dependencies):
self.build_dependencies()<|docstring|>Initialization method.
Args:
name (str): name of the package.
path (str): path to the package.
dsm (DSM): parent DSM.
package (Package): parent package.
limit_to (list of str):
list of string to limit the recursive tree-building to
what is specified.
build_tree (bool): auto-build the tree or not.
build_dependencies (bool): auto-build the dependencies or not.
enforce_init (bool):
if True, only treat directories if they contain an
``__init__.py`` file.<|endoftext|>
|
8b8f2804f33ccc7df7bcd921432f8690f9cf226e41ec22dc798fd3c35ceb1f0d
|
@property
def ispackage(self):
'Inherited from NodeMixin. Always True.'
return True
|
Inherited from NodeMixin. Always True.
|
src/dependenpy/dsm.py
|
ispackage
|
pawamoy/dependenpy
| 10
|
python
|
@property
def ispackage(self):
return True
|
@property
def ispackage(self):
return True<|docstring|>Inherited from NodeMixin. Always True.<|endoftext|>
|
77097be9e72aab672ce7ea1138edb9920ff1fcff45b4cc47cc0c384492ab9f80
|
@property
def issubpackage(self):
'\n Property to tell if this node is a sub-package.\n\n Returns:\n bool: this package has a parent.\n '
return (self.package is not None)
|
Property to tell if this node is a sub-package.
Returns:
bool: this package has a parent.
|
src/dependenpy/dsm.py
|
issubpackage
|
pawamoy/dependenpy
| 10
|
python
|
@property
def issubpackage(self):
'\n Property to tell if this node is a sub-package.\n\n Returns:\n bool: this package has a parent.\n '
return (self.package is not None)
|
@property
def issubpackage(self):
'\n Property to tell if this node is a sub-package.\n\n Returns:\n bool: this package has a parent.\n '
return (self.package is not None)<|docstring|>Property to tell if this node is a sub-package.
Returns:
bool: this package has a parent.<|endoftext|>
|
f656fe20a151d1715c587adf5f5518d50db0623efd6e1cf8345f8e785b47e5c5
|
@property
def isroot(self):
'\n Property to tell if this node is a root node.\n\n Returns:\n bool: this package has no parent.\n '
return (self.package is None)
|
Property to tell if this node is a root node.
Returns:
bool: this package has no parent.
|
src/dependenpy/dsm.py
|
isroot
|
pawamoy/dependenpy
| 10
|
python
|
@property
def isroot(self):
'\n Property to tell if this node is a root node.\n\n Returns:\n bool: this package has no parent.\n '
return (self.package is None)
|
@property
def isroot(self):
'\n Property to tell if this node is a root node.\n\n Returns:\n bool: this package has no parent.\n '
return (self.package is None)<|docstring|>Property to tell if this node is a root node.
Returns:
bool: this package has no parent.<|endoftext|>
|
c00143cab22302342ff069f9100666ea293a62543ebb7121cdd7b0a2ae807b05
|
def split_limits_heads(self):
'\n Return first parts of dot-separated strings, and rest of strings.\n\n Returns:\n (list of str, list of str): the heads and rest of the strings.\n '
heads = []
new_limit_to = []
for limit in self.limit_to:
if ('.' in limit):
(name, limit) = limit.split('.', 1)
heads.append(name)
new_limit_to.append(limit)
else:
heads.append(limit)
return (heads, new_limit_to)
|
Return first parts of dot-separated strings, and rest of strings.
Returns:
(list of str, list of str): the heads and rest of the strings.
|
src/dependenpy/dsm.py
|
split_limits_heads
|
pawamoy/dependenpy
| 10
|
python
|
def split_limits_heads(self):
'\n Return first parts of dot-separated strings, and rest of strings.\n\n Returns:\n (list of str, list of str): the heads and rest of the strings.\n '
heads = []
new_limit_to = []
for limit in self.limit_to:
if ('.' in limit):
(name, limit) = limit.split('.', 1)
heads.append(name)
new_limit_to.append(limit)
else:
heads.append(limit)
return (heads, new_limit_to)
|
def split_limits_heads(self):
'\n Return first parts of dot-separated strings, and rest of strings.\n\n Returns:\n (list of str, list of str): the heads and rest of the strings.\n '
heads = []
new_limit_to = []
for limit in self.limit_to:
if ('.' in limit):
(name, limit) = limit.split('.', 1)
heads.append(name)
new_limit_to.append(limit)
else:
heads.append(limit)
return (heads, new_limit_to)<|docstring|>Return first parts of dot-separated strings, and rest of strings.
Returns:
(list of str, list of str): the heads and rest of the strings.<|endoftext|>
|
adab9232cde00793214c6b8111646adb1b7c2967af57e6e56c55503e748b4faa
|
def build_tree(self):
'Build the tree for this package.'
for m in listdir(self.path):
abs_m = join(self.path, m)
if (isfile(abs_m) and m.endswith('.py')):
name = splitext(m)[0]
if ((not self.limit_to) or (name in self.limit_to)):
self.modules.append(Module(name, abs_m, self.dsm, self))
elif isdir(abs_m):
if (isfile(join(abs_m, '__init__.py')) or (not self.enforce_init)):
(heads, new_limit_to) = self.split_limits_heads()
if ((not heads) or (m in heads)):
self.packages.append(Package(m, abs_m, self.dsm, self, new_limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
|
Build the tree for this package.
|
src/dependenpy/dsm.py
|
build_tree
|
pawamoy/dependenpy
| 10
|
python
|
def build_tree(self):
for m in listdir(self.path):
abs_m = join(self.path, m)
if (isfile(abs_m) and m.endswith('.py')):
name = splitext(m)[0]
if ((not self.limit_to) or (name in self.limit_to)):
self.modules.append(Module(name, abs_m, self.dsm, self))
elif isdir(abs_m):
if (isfile(join(abs_m, '__init__.py')) or (not self.enforce_init)):
(heads, new_limit_to) = self.split_limits_heads()
if ((not heads) or (m in heads)):
self.packages.append(Package(m, abs_m, self.dsm, self, new_limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
|
def build_tree(self):
for m in listdir(self.path):
abs_m = join(self.path, m)
if (isfile(abs_m) and m.endswith('.py')):
name = splitext(m)[0]
if ((not self.limit_to) or (name in self.limit_to)):
self.modules.append(Module(name, abs_m, self.dsm, self))
elif isdir(abs_m):
if (isfile(join(abs_m, '__init__.py')) or (not self.enforce_init)):
(heads, new_limit_to) = self.split_limits_heads()
if ((not heads) or (m in heads)):
self.packages.append(Package(m, abs_m, self.dsm, self, new_limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))<|docstring|>Build the tree for this package.<|endoftext|>
|
09309662f472fdf84e36227f2346b147baf8921cd943b0bd59183e9e6f1e09f7
|
def cardinal(self, to):
'\n Return the number of dependencies of this package to the given node.\n\n Args:\n to (Package/Module): target node.\n\n Returns:\n int: number of dependencies.\n '
return sum((m.cardinal(to) for m in self.submodules))
|
Return the number of dependencies of this package to the given node.
Args:
to (Package/Module): target node.
Returns:
int: number of dependencies.
|
src/dependenpy/dsm.py
|
cardinal
|
pawamoy/dependenpy
| 10
|
python
|
def cardinal(self, to):
'\n Return the number of dependencies of this package to the given node.\n\n Args:\n to (Package/Module): target node.\n\n Returns:\n int: number of dependencies.\n '
return sum((m.cardinal(to) for m in self.submodules))
|
def cardinal(self, to):
'\n Return the number of dependencies of this package to the given node.\n\n Args:\n to (Package/Module): target node.\n\n Returns:\n int: number of dependencies.\n '
return sum((m.cardinal(to) for m in self.submodules))<|docstring|>Return the number of dependencies of this package to the given node.
Args:
to (Package/Module): target node.
Returns:
int: number of dependencies.<|endoftext|>
|
f161a79acf71c7f9fb24bf3dcdcb80874f65d6d461fcd64df1c9960173eb0f57
|
def __init__(self, name, path, dsm=None, package=None):
'\n Initialization method.\n\n Args:\n name (str): name of the module.\n path (str): path to the module.\n dsm (DSM): parent DSM.\n package (Package): parent Package.\n '
super().__init__()
self.name = name
self.path = path
self.package = package
self.dsm = dsm
self.dependencies = []
|
Initialization method.
Args:
name (str): name of the module.
path (str): path to the module.
dsm (DSM): parent DSM.
package (Package): parent Package.
|
src/dependenpy/dsm.py
|
__init__
|
pawamoy/dependenpy
| 10
|
python
|
def __init__(self, name, path, dsm=None, package=None):
'\n Initialization method.\n\n Args:\n name (str): name of the module.\n path (str): path to the module.\n dsm (DSM): parent DSM.\n package (Package): parent Package.\n '
super().__init__()
self.name = name
self.path = path
self.package = package
self.dsm = dsm
self.dependencies = []
|
def __init__(self, name, path, dsm=None, package=None):
'\n Initialization method.\n\n Args:\n name (str): name of the module.\n path (str): path to the module.\n dsm (DSM): parent DSM.\n package (Package): parent Package.\n '
super().__init__()
self.name = name
self.path = path
self.package = package
self.dsm = dsm
self.dependencies = []<|docstring|>Initialization method.
Args:
name (str): name of the module.
path (str): path to the module.
dsm (DSM): parent DSM.
package (Package): parent Package.<|endoftext|>
|
3c9bb393b72934001acc7aaa4aea1e47bc9c3e25c7c6dd4d6a0354ccbf17dfa6
|
def __contains__(self, item):
"\n Whether given item is contained inside this module.\n\n Args:\n item (Package/Module): a package or module.\n\n Returns:\n bool:\n True if self is item or item is self's package and\n self if an ``__init__`` module.\n "
if (self is item):
return True
elif ((self.package is item) and (self.name == '__init__')):
return True
return False
|
Whether given item is contained inside this module.
Args:
item (Package/Module): a package or module.
Returns:
bool:
True if self is item or item is self's package and
self if an ``__init__`` module.
|
src/dependenpy/dsm.py
|
__contains__
|
pawamoy/dependenpy
| 10
|
python
|
def __contains__(self, item):
"\n Whether given item is contained inside this module.\n\n Args:\n item (Package/Module): a package or module.\n\n Returns:\n bool:\n True if self is item or item is self's package and\n self if an ``__init__`` module.\n "
if (self is item):
return True
elif ((self.package is item) and (self.name == '__init__')):
return True
return False
|
def __contains__(self, item):
"\n Whether given item is contained inside this module.\n\n Args:\n item (Package/Module): a package or module.\n\n Returns:\n bool:\n True if self is item or item is self's package and\n self if an ``__init__`` module.\n "
if (self is item):
return True
elif ((self.package is item) and (self.name == '__init__')):
return True
return False<|docstring|>Whether given item is contained inside this module.
Args:
item (Package/Module): a package or module.
Returns:
bool:
True if self is item or item is self's package and
self if an ``__init__`` module.<|endoftext|>
|
397300c23f30d370328c171ba0973c7b6d94c3b62ecd4a377a99837afcc3a83b
|
@property
def ismodule(self):
'Inherited from NodeMixin. Always True.'
return True
|
Inherited from NodeMixin. Always True.
|
src/dependenpy/dsm.py
|
ismodule
|
pawamoy/dependenpy
| 10
|
python
|
@property
def ismodule(self):
return True
|
@property
def ismodule(self):
return True<|docstring|>Inherited from NodeMixin. Always True.<|endoftext|>
|
ded7f7ae673983d609cf1f4906a2a708dbfa2e8440b50c3975a1f690a36fee08
|
def as_dict(self, absolute=False):
'\n Return the dependencies as a dictionary.\n\n Returns:\n dict: dictionary of dependencies.\n '
return {'name': (self.absolute_name() if absolute else self.name), 'path': self.path, 'dependencies': [{'target': (d.target if d.external else d.target.absolute_name()), 'lineno': d.lineno, 'what': d.what, 'external': d.external} for d in self.dependencies]}
|
Return the dependencies as a dictionary.
Returns:
dict: dictionary of dependencies.
|
src/dependenpy/dsm.py
|
as_dict
|
pawamoy/dependenpy
| 10
|
python
|
def as_dict(self, absolute=False):
'\n Return the dependencies as a dictionary.\n\n Returns:\n dict: dictionary of dependencies.\n '
return {'name': (self.absolute_name() if absolute else self.name), 'path': self.path, 'dependencies': [{'target': (d.target if d.external else d.target.absolute_name()), 'lineno': d.lineno, 'what': d.what, 'external': d.external} for d in self.dependencies]}
|
def as_dict(self, absolute=False):
'\n Return the dependencies as a dictionary.\n\n Returns:\n dict: dictionary of dependencies.\n '
return {'name': (self.absolute_name() if absolute else self.name), 'path': self.path, 'dependencies': [{'target': (d.target if d.external else d.target.absolute_name()), 'lineno': d.lineno, 'what': d.what, 'external': d.external} for d in self.dependencies]}<|docstring|>Return the dependencies as a dictionary.
Returns:
dict: dictionary of dependencies.<|endoftext|>
|
f01711e69864a1445150ec7323c9a35c28acfc86b9bebb6c5dd6c3fb5067434d
|
def build_dependencies(self):
'\n Build the dependencies for this module.\n\n Parse the code with ast, find all the import statements, convert\n them into Dependency objects.\n '
highest = (self.dsm or self.root)
if (self is highest):
highest = LeafNode()
for _import in self.parse_code():
target = highest.get_target(_import['target'])
if target:
what = _import['target'].split('.')[(- 1)]
if (what != target.name):
_import['what'] = what
_import['target'] = target
self.dependencies.append(Dependency(source=self, **_import))
|
Build the dependencies for this module.
Parse the code with ast, find all the import statements, convert
them into Dependency objects.
|
src/dependenpy/dsm.py
|
build_dependencies
|
pawamoy/dependenpy
| 10
|
python
|
def build_dependencies(self):
'\n Build the dependencies for this module.\n\n Parse the code with ast, find all the import statements, convert\n them into Dependency objects.\n '
highest = (self.dsm or self.root)
if (self is highest):
highest = LeafNode()
for _import in self.parse_code():
target = highest.get_target(_import['target'])
if target:
what = _import['target'].split('.')[(- 1)]
if (what != target.name):
_import['what'] = what
_import['target'] = target
self.dependencies.append(Dependency(source=self, **_import))
|
def build_dependencies(self):
'\n Build the dependencies for this module.\n\n Parse the code with ast, find all the import statements, convert\n them into Dependency objects.\n '
highest = (self.dsm or self.root)
if (self is highest):
highest = LeafNode()
for _import in self.parse_code():
target = highest.get_target(_import['target'])
if target:
what = _import['target'].split('.')[(- 1)]
if (what != target.name):
_import['what'] = what
_import['target'] = target
self.dependencies.append(Dependency(source=self, **_import))<|docstring|>Build the dependencies for this module.
Parse the code with ast, find all the import statements, convert
them into Dependency objects.<|endoftext|>
|
ce3e649a9ea2e2fb2848c8a2ec5314651fbd9e2c57eab90eb0469682aee3bb5f
|
def parse_code(self):
'\n Read the source code and return all the import statements.\n\n Returns:\n list of dict: the import statements.\n '
code = open(self.path, encoding='utf-8').read()
try:
body = ast.parse(code).body
except SyntaxError:
try:
code = code.encode('utf-8')
body = ast.parse(code).body
except SyntaxError:
return []
return self.get_imports(body)
|
Read the source code and return all the import statements.
Returns:
list of dict: the import statements.
|
src/dependenpy/dsm.py
|
parse_code
|
pawamoy/dependenpy
| 10
|
python
|
def parse_code(self):
'\n Read the source code and return all the import statements.\n\n Returns:\n list of dict: the import statements.\n '
code = open(self.path, encoding='utf-8').read()
try:
body = ast.parse(code).body
except SyntaxError:
try:
code = code.encode('utf-8')
body = ast.parse(code).body
except SyntaxError:
return []
return self.get_imports(body)
|
def parse_code(self):
'\n Read the source code and return all the import statements.\n\n Returns:\n list of dict: the import statements.\n '
code = open(self.path, encoding='utf-8').read()
try:
body = ast.parse(code).body
except SyntaxError:
try:
code = code.encode('utf-8')
body = ast.parse(code).body
except SyntaxError:
return []
return self.get_imports(body)<|docstring|>Read the source code and return all the import statements.
Returns:
list of dict: the import statements.<|endoftext|>
|
cf999e4e6136281fe60ac4568068ddfe34e7b1a7a1d0356825fdebf83f2e25bd
|
def get_imports(self, ast_body):
"\n Return all the import statements given an AST body (AST nodes).\n\n Args:\n ast_body (compiled code's body): the body to filter.\n\n Returns:\n list of dict: the import statements.\n "
imports = []
for node in ast_body:
if isinstance(node, ast.Import):
imports.extend(({'target': name.name, 'lineno': node.lineno} for name in node.names))
elif isinstance(node, ast.ImportFrom):
for name in node.names:
name = ((((self.absolute_name((self.depth - node.level)) + '.') if (node.level > 0) else '') + ((node.module + '.') if node.module else '')) + name.name)
imports.append({'target': name, 'lineno': node.lineno})
elif isinstance(node, Module.RECURSIVE_NODES):
imports.extend(self.get_imports(node.body))
if isinstance(node, ast.Try):
imports.extend(self.get_imports(node.finalbody))
return imports
|
Return all the import statements given an AST body (AST nodes).
Args:
ast_body (compiled code's body): the body to filter.
Returns:
list of dict: the import statements.
|
src/dependenpy/dsm.py
|
get_imports
|
pawamoy/dependenpy
| 10
|
python
|
def get_imports(self, ast_body):
"\n Return all the import statements given an AST body (AST nodes).\n\n Args:\n ast_body (compiled code's body): the body to filter.\n\n Returns:\n list of dict: the import statements.\n "
imports = []
for node in ast_body:
if isinstance(node, ast.Import):
imports.extend(({'target': name.name, 'lineno': node.lineno} for name in node.names))
elif isinstance(node, ast.ImportFrom):
for name in node.names:
name = ((((self.absolute_name((self.depth - node.level)) + '.') if (node.level > 0) else ) + ((node.module + '.') if node.module else )) + name.name)
imports.append({'target': name, 'lineno': node.lineno})
elif isinstance(node, Module.RECURSIVE_NODES):
imports.extend(self.get_imports(node.body))
if isinstance(node, ast.Try):
imports.extend(self.get_imports(node.finalbody))
return imports
|
def get_imports(self, ast_body):
"\n Return all the import statements given an AST body (AST nodes).\n\n Args:\n ast_body (compiled code's body): the body to filter.\n\n Returns:\n list of dict: the import statements.\n "
imports = []
for node in ast_body:
if isinstance(node, ast.Import):
imports.extend(({'target': name.name, 'lineno': node.lineno} for name in node.names))
elif isinstance(node, ast.ImportFrom):
for name in node.names:
name = ((((self.absolute_name((self.depth - node.level)) + '.') if (node.level > 0) else ) + ((node.module + '.') if node.module else )) + name.name)
imports.append({'target': name, 'lineno': node.lineno})
elif isinstance(node, Module.RECURSIVE_NODES):
imports.extend(self.get_imports(node.body))
if isinstance(node, ast.Try):
imports.extend(self.get_imports(node.finalbody))
return imports<|docstring|>Return all the import statements given an AST body (AST nodes).
Args:
ast_body (compiled code's body): the body to filter.
Returns:
list of dict: the import statements.<|endoftext|>
|
0380f177480a8ba3bcec8ea86fdd8603924762743b1f0911b20595130baa2640
|
def cardinal(self, to):
'\n Return the number of dependencies of this module to the given node.\n\n Args:\n to (Package/Module): the target node.\n\n Returns:\n int: number of dependencies.\n '
return sum((1 for _ in filter((lambda d: ((not d.external) and (d.target in to))), self.dependencies)))
|
Return the number of dependencies of this module to the given node.
Args:
to (Package/Module): the target node.
Returns:
int: number of dependencies.
|
src/dependenpy/dsm.py
|
cardinal
|
pawamoy/dependenpy
| 10
|
python
|
def cardinal(self, to):
'\n Return the number of dependencies of this module to the given node.\n\n Args:\n to (Package/Module): the target node.\n\n Returns:\n int: number of dependencies.\n '
return sum((1 for _ in filter((lambda d: ((not d.external) and (d.target in to))), self.dependencies)))
|
def cardinal(self, to):
'\n Return the number of dependencies of this module to the given node.\n\n Args:\n to (Package/Module): the target node.\n\n Returns:\n int: number of dependencies.\n '
return sum((1 for _ in filter((lambda d: ((not d.external) and (d.target in to))), self.dependencies)))<|docstring|>Return the number of dependencies of this module to the given node.
Args:
to (Package/Module): the target node.
Returns:
int: number of dependencies.<|endoftext|>
|
5d9ff2aa9b17d65f82d5c865b7241af28bd3474c642e9be7d4f283055c5ca625
|
def __init__(self, source, lineno, target, what=None):
'\n Initialization method.\n\n Args:\n source (Module): source Module.\n lineno (int): number of line at which import statement occurs.\n target (str/Module/Package): the target node.\n what (str): what is imported (optional).\n '
self.source = source
self.lineno = lineno
self.target = target
self.what = what
|
Initialization method.
Args:
source (Module): source Module.
lineno (int): number of line at which import statement occurs.
target (str/Module/Package): the target node.
what (str): what is imported (optional).
|
src/dependenpy/dsm.py
|
__init__
|
pawamoy/dependenpy
| 10
|
python
|
def __init__(self, source, lineno, target, what=None):
'\n Initialization method.\n\n Args:\n source (Module): source Module.\n lineno (int): number of line at which import statement occurs.\n target (str/Module/Package): the target node.\n what (str): what is imported (optional).\n '
self.source = source
self.lineno = lineno
self.target = target
self.what = what
|
def __init__(self, source, lineno, target, what=None):
'\n Initialization method.\n\n Args:\n source (Module): source Module.\n lineno (int): number of line at which import statement occurs.\n target (str/Module/Package): the target node.\n what (str): what is imported (optional).\n '
self.source = source
self.lineno = lineno
self.target = target
self.what = what<|docstring|>Initialization method.
Args:
source (Module): source Module.
lineno (int): number of line at which import statement occurs.
target (str/Module/Package): the target node.
what (str): what is imported (optional).<|endoftext|>
|
5ad06d0757210fe64c4bef06638e189fd66d8433c8b7290b91245454beae5253
|
@property
def external(self):
"Property to tell if the dependency's target is a valid node."
return isinstance(self.target, str)
|
Property to tell if the dependency's target is a valid node.
|
src/dependenpy/dsm.py
|
external
|
pawamoy/dependenpy
| 10
|
python
|
@property
def external(self):
return isinstance(self.target, str)
|
@property
def external(self):
return isinstance(self.target, str)<|docstring|>Property to tell if the dependency's target is a valid node.<|endoftext|>
|
6e43a2ab821dceb8f4b26f7843410ed5c8cdb2eb404552ea1591ff73ef31f485
|
def get_setup_args():
'Get arguments needed in setup.py.'
parser = argparse.ArgumentParser('Download and pre-process SQuAD')
add_common_args(parser)
parser.add_argument('--train_url', type=str, default='https://github.com/chrischute/squad/data/train-v2.0.json')
parser.add_argument('--dev_url', type=str, default='https://github.com/chrischute/squad/data/dev-v2.0.json')
parser.add_argument('--test_url', type=str, default='https://github.com/chrischute/squad/data/test-v2.0.json')
parser.add_argument('--glove_url', type=str, default='http://nlp.stanford.edu/data/glove.840B.300d.zip')
parser.add_argument('--dev_meta_file', type=str, default='./data/dev_meta.json')
parser.add_argument('--test_meta_file', type=str, default='./data/test_meta.json')
parser.add_argument('--word2idx_file', type=str, default='./data/word2idx.json')
parser.add_argument('--char2idx_file', type=str, default='./data/char2idx.json')
parser.add_argument('--answer_file', type=str, default='./data/answer.json')
parser.add_argument('--para_limit', type=int, default=400, help='Max number of words in a paragraph')
parser.add_argument('--ques_limit', type=int, default=50, help='Max number of words to keep from a question')
parser.add_argument('--test_para_limit', type=int, default=1000, help='Max number of words in a paragraph at test time')
parser.add_argument('--test_ques_limit', type=int, default=100, help='Max number of words in a question at test time')
parser.add_argument('--char_dim', type=int, default=64, help='Size of char vectors (char-level embeddings)')
parser.add_argument('--glove_dim', type=int, default=300, help='Size of GloVe word vectors to use')
parser.add_argument('--glove_num_vecs', type=int, default=2196017, help='Number of GloVe vectors')
parser.add_argument('--ans_limit', type=int, default=30, help='Max number of words in a training example answer')
parser.add_argument('--char_limit', type=int, default=16, help='Max number of chars to keep from a word')
parser.add_argument('--include_test_examples', type=(lambda s: s.lower().startswith('t')), default=True, help='Process examples from the test set')
args = parser.parse_args()
return args
|
Get arguments needed in setup.py.
|
args.py
|
get_setup_args
|
amelia22974/cs224n-2022-iid-squad
| 0
|
python
|
def get_setup_args():
parser = argparse.ArgumentParser('Download and pre-process SQuAD')
add_common_args(parser)
parser.add_argument('--train_url', type=str, default='https://github.com/chrischute/squad/data/train-v2.0.json')
parser.add_argument('--dev_url', type=str, default='https://github.com/chrischute/squad/data/dev-v2.0.json')
parser.add_argument('--test_url', type=str, default='https://github.com/chrischute/squad/data/test-v2.0.json')
parser.add_argument('--glove_url', type=str, default='http://nlp.stanford.edu/data/glove.840B.300d.zip')
parser.add_argument('--dev_meta_file', type=str, default='./data/dev_meta.json')
parser.add_argument('--test_meta_file', type=str, default='./data/test_meta.json')
parser.add_argument('--word2idx_file', type=str, default='./data/word2idx.json')
parser.add_argument('--char2idx_file', type=str, default='./data/char2idx.json')
parser.add_argument('--answer_file', type=str, default='./data/answer.json')
parser.add_argument('--para_limit', type=int, default=400, help='Max number of words in a paragraph')
parser.add_argument('--ques_limit', type=int, default=50, help='Max number of words to keep from a question')
parser.add_argument('--test_para_limit', type=int, default=1000, help='Max number of words in a paragraph at test time')
parser.add_argument('--test_ques_limit', type=int, default=100, help='Max number of words in a question at test time')
parser.add_argument('--char_dim', type=int, default=64, help='Size of char vectors (char-level embeddings)')
parser.add_argument('--glove_dim', type=int, default=300, help='Size of GloVe word vectors to use')
parser.add_argument('--glove_num_vecs', type=int, default=2196017, help='Number of GloVe vectors')
parser.add_argument('--ans_limit', type=int, default=30, help='Max number of words in a training example answer')
parser.add_argument('--char_limit', type=int, default=16, help='Max number of chars to keep from a word')
parser.add_argument('--include_test_examples', type=(lambda s: s.lower().startswith('t')), default=True, help='Process examples from the test set')
args = parser.parse_args()
return args
|
def get_setup_args():
parser = argparse.ArgumentParser('Download and pre-process SQuAD')
add_common_args(parser)
parser.add_argument('--train_url', type=str, default='https://github.com/chrischute/squad/data/train-v2.0.json')
parser.add_argument('--dev_url', type=str, default='https://github.com/chrischute/squad/data/dev-v2.0.json')
parser.add_argument('--test_url', type=str, default='https://github.com/chrischute/squad/data/test-v2.0.json')
parser.add_argument('--glove_url', type=str, default='http://nlp.stanford.edu/data/glove.840B.300d.zip')
parser.add_argument('--dev_meta_file', type=str, default='./data/dev_meta.json')
parser.add_argument('--test_meta_file', type=str, default='./data/test_meta.json')
parser.add_argument('--word2idx_file', type=str, default='./data/word2idx.json')
parser.add_argument('--char2idx_file', type=str, default='./data/char2idx.json')
parser.add_argument('--answer_file', type=str, default='./data/answer.json')
parser.add_argument('--para_limit', type=int, default=400, help='Max number of words in a paragraph')
parser.add_argument('--ques_limit', type=int, default=50, help='Max number of words to keep from a question')
parser.add_argument('--test_para_limit', type=int, default=1000, help='Max number of words in a paragraph at test time')
parser.add_argument('--test_ques_limit', type=int, default=100, help='Max number of words in a question at test time')
parser.add_argument('--char_dim', type=int, default=64, help='Size of char vectors (char-level embeddings)')
parser.add_argument('--glove_dim', type=int, default=300, help='Size of GloVe word vectors to use')
parser.add_argument('--glove_num_vecs', type=int, default=2196017, help='Number of GloVe vectors')
parser.add_argument('--ans_limit', type=int, default=30, help='Max number of words in a training example answer')
parser.add_argument('--char_limit', type=int, default=16, help='Max number of chars to keep from a word')
parser.add_argument('--include_test_examples', type=(lambda s: s.lower().startswith('t')), default=True, help='Process examples from the test set')
args = parser.parse_args()
return args<|docstring|>Get arguments needed in setup.py.<|endoftext|>
|
4a6b3f12e94b8912f23df5eef451f4ac31edb78f27c1bf5c4edfeadd4ebb53d6
|
def get_train_args():
'Get arguments needed in train.py.'
parser = argparse.ArgumentParser('Train a model on SQuAD')
add_common_args(parser)
add_train_test_args(parser)
parser.add_argument('--eval_steps', type=int, default=50000, help='Number of steps between successive evaluations.')
parser.add_argument('--lr', type=float, default=0.5, help='Learning rate.')
parser.add_argument('--l2_wd', type=float, default=0, help='L2 weight decay.')
parser.add_argument('--num_epochs', type=int, default=30, help='Number of epochs for which to train. Negative means forever.')
parser.add_argument('--drop_prob', type=float, default=0.2, help='Probability of zeroing an activation in dropout layers.')
parser.add_argument('--metric_name', type=str, default='F1', choices=('NLL', 'EM', 'F1'), help='Name of dev metric to determine best checkpoint.')
parser.add_argument('--max_checkpoints', type=int, default=5, help='Maximum number of checkpoints to keep on disk.')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Maximum gradient norm for gradient clipping.')
parser.add_argument('--seed', type=int, default=224, help='Random seed for reproducibility.')
parser.add_argument('--ema_decay', type=float, default=0.999, help='Decay rate for exponential moving average of parameters.')
parser.add_argument('--span_corrupt', type=bool, default=False, help='Whether or not to span corrupt the training data. The corruption occurs to the context.')
parser.add_argument('--back_translation', type=bool, default=False, help='Whether or not to use backtranslation on the training data.')
parser.add_argument('--use_char_emb', type=bool, default=False, help='Use character embedding')
args = parser.parse_args()
if (args.metric_name == 'NLL'):
args.maximize_metric = False
elif (args.metric_name in ('EM', 'F1')):
args.maximize_metric = True
else:
raise ValueError(f'Unrecognized metric name: "{args.metric_name}"')
return args
|
Get arguments needed in train.py.
|
args.py
|
get_train_args
|
amelia22974/cs224n-2022-iid-squad
| 0
|
python
|
def get_train_args():
parser = argparse.ArgumentParser('Train a model on SQuAD')
add_common_args(parser)
add_train_test_args(parser)
parser.add_argument('--eval_steps', type=int, default=50000, help='Number of steps between successive evaluations.')
parser.add_argument('--lr', type=float, default=0.5, help='Learning rate.')
parser.add_argument('--l2_wd', type=float, default=0, help='L2 weight decay.')
parser.add_argument('--num_epochs', type=int, default=30, help='Number of epochs for which to train. Negative means forever.')
parser.add_argument('--drop_prob', type=float, default=0.2, help='Probability of zeroing an activation in dropout layers.')
parser.add_argument('--metric_name', type=str, default='F1', choices=('NLL', 'EM', 'F1'), help='Name of dev metric to determine best checkpoint.')
parser.add_argument('--max_checkpoints', type=int, default=5, help='Maximum number of checkpoints to keep on disk.')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Maximum gradient norm for gradient clipping.')
parser.add_argument('--seed', type=int, default=224, help='Random seed for reproducibility.')
parser.add_argument('--ema_decay', type=float, default=0.999, help='Decay rate for exponential moving average of parameters.')
parser.add_argument('--span_corrupt', type=bool, default=False, help='Whether or not to span corrupt the training data. The corruption occurs to the context.')
parser.add_argument('--back_translation', type=bool, default=False, help='Whether or not to use backtranslation on the training data.')
parser.add_argument('--use_char_emb', type=bool, default=False, help='Use character embedding')
args = parser.parse_args()
if (args.metric_name == 'NLL'):
args.maximize_metric = False
elif (args.metric_name in ('EM', 'F1')):
args.maximize_metric = True
else:
raise ValueError(f'Unrecognized metric name: "{args.metric_name}"')
return args
|
def get_train_args():
parser = argparse.ArgumentParser('Train a model on SQuAD')
add_common_args(parser)
add_train_test_args(parser)
parser.add_argument('--eval_steps', type=int, default=50000, help='Number of steps between successive evaluations.')
parser.add_argument('--lr', type=float, default=0.5, help='Learning rate.')
parser.add_argument('--l2_wd', type=float, default=0, help='L2 weight decay.')
parser.add_argument('--num_epochs', type=int, default=30, help='Number of epochs for which to train. Negative means forever.')
parser.add_argument('--drop_prob', type=float, default=0.2, help='Probability of zeroing an activation in dropout layers.')
parser.add_argument('--metric_name', type=str, default='F1', choices=('NLL', 'EM', 'F1'), help='Name of dev metric to determine best checkpoint.')
parser.add_argument('--max_checkpoints', type=int, default=5, help='Maximum number of checkpoints to keep on disk.')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Maximum gradient norm for gradient clipping.')
parser.add_argument('--seed', type=int, default=224, help='Random seed for reproducibility.')
parser.add_argument('--ema_decay', type=float, default=0.999, help='Decay rate for exponential moving average of parameters.')
parser.add_argument('--span_corrupt', type=bool, default=False, help='Whether or not to span corrupt the training data. The corruption occurs to the context.')
parser.add_argument('--back_translation', type=bool, default=False, help='Whether or not to use backtranslation on the training data.')
parser.add_argument('--use_char_emb', type=bool, default=False, help='Use character embedding')
args = parser.parse_args()
if (args.metric_name == 'NLL'):
args.maximize_metric = False
elif (args.metric_name in ('EM', 'F1')):
args.maximize_metric = True
else:
raise ValueError(f'Unrecognized metric name: "{args.metric_name}"')
return args<|docstring|>Get arguments needed in train.py.<|endoftext|>
|
cd524bd7317931f3a4570dc0c07d44aaeb4f40b094677f18be353dc2d605be14
|
def get_test_args():
'Get arguments needed in test.py.'
parser = argparse.ArgumentParser('Test a trained model on SQuAD')
add_common_args(parser)
add_train_test_args(parser)
parser.add_argument('--split', type=str, default='dev', choices=('train', 'dev', 'test'), help='Split to use for testing.')
parser.add_argument('--sub_file', type=str, default='submission.csv', help='Name for submission file.')
args = parser.parse_args()
if (not args.load_path):
raise argparse.ArgumentError('Missing required argument --load_path')
return args
|
Get arguments needed in test.py.
|
args.py
|
get_test_args
|
amelia22974/cs224n-2022-iid-squad
| 0
|
python
|
def get_test_args():
parser = argparse.ArgumentParser('Test a trained model on SQuAD')
add_common_args(parser)
add_train_test_args(parser)
parser.add_argument('--split', type=str, default='dev', choices=('train', 'dev', 'test'), help='Split to use for testing.')
parser.add_argument('--sub_file', type=str, default='submission.csv', help='Name for submission file.')
args = parser.parse_args()
if (not args.load_path):
raise argparse.ArgumentError('Missing required argument --load_path')
return args
|
def get_test_args():
parser = argparse.ArgumentParser('Test a trained model on SQuAD')
add_common_args(parser)
add_train_test_args(parser)
parser.add_argument('--split', type=str, default='dev', choices=('train', 'dev', 'test'), help='Split to use for testing.')
parser.add_argument('--sub_file', type=str, default='submission.csv', help='Name for submission file.')
args = parser.parse_args()
if (not args.load_path):
raise argparse.ArgumentError('Missing required argument --load_path')
return args<|docstring|>Get arguments needed in test.py.<|endoftext|>
|
a3ba29287b22e86fbaf6d7bd698f5f9ca59df04fc96f1f568804d902300d81eb
|
def add_common_args(parser):
'Add arguments common to all 3 scripts: setup.py, train.py, test.py'
parser.add_argument('--train_record_file', type=str, default='./data/train.npz')
parser.add_argument('--dev_record_file', type=str, default='./data/dev.npz')
parser.add_argument('--test_record_file', type=str, default='./data/test.npz')
parser.add_argument('--word_emb_file', type=str, default='./data/word_emb.json')
parser.add_argument('--char_emb_file', type=str, default='./data/char_emb.json')
parser.add_argument('--train_eval_file', type=str, default='./data/train_eval.json')
parser.add_argument('--dev_eval_file', type=str, default='./data/dev_eval.json')
parser.add_argument('--test_eval_file', type=str, default='./data/test_eval.json')
parser.add_argument('--train_span_corrupt_record_file', type=str, default='./data/train_span_corrupt.npz')
parser.add_argument('--train_back_translation_record_file', type=str, default='./data/train_back_translation.npz')
|
Add arguments common to all 3 scripts: setup.py, train.py, test.py
|
args.py
|
add_common_args
|
amelia22974/cs224n-2022-iid-squad
| 0
|
python
|
def add_common_args(parser):
parser.add_argument('--train_record_file', type=str, default='./data/train.npz')
parser.add_argument('--dev_record_file', type=str, default='./data/dev.npz')
parser.add_argument('--test_record_file', type=str, default='./data/test.npz')
parser.add_argument('--word_emb_file', type=str, default='./data/word_emb.json')
parser.add_argument('--char_emb_file', type=str, default='./data/char_emb.json')
parser.add_argument('--train_eval_file', type=str, default='./data/train_eval.json')
parser.add_argument('--dev_eval_file', type=str, default='./data/dev_eval.json')
parser.add_argument('--test_eval_file', type=str, default='./data/test_eval.json')
parser.add_argument('--train_span_corrupt_record_file', type=str, default='./data/train_span_corrupt.npz')
parser.add_argument('--train_back_translation_record_file', type=str, default='./data/train_back_translation.npz')
|
def add_common_args(parser):
parser.add_argument('--train_record_file', type=str, default='./data/train.npz')
parser.add_argument('--dev_record_file', type=str, default='./data/dev.npz')
parser.add_argument('--test_record_file', type=str, default='./data/test.npz')
parser.add_argument('--word_emb_file', type=str, default='./data/word_emb.json')
parser.add_argument('--char_emb_file', type=str, default='./data/char_emb.json')
parser.add_argument('--train_eval_file', type=str, default='./data/train_eval.json')
parser.add_argument('--dev_eval_file', type=str, default='./data/dev_eval.json')
parser.add_argument('--test_eval_file', type=str, default='./data/test_eval.json')
parser.add_argument('--train_span_corrupt_record_file', type=str, default='./data/train_span_corrupt.npz')
parser.add_argument('--train_back_translation_record_file', type=str, default='./data/train_back_translation.npz')<|docstring|>Add arguments common to all 3 scripts: setup.py, train.py, test.py<|endoftext|>
|
57a27f5d62011b6300f92da72ccd32824cd23a1922d057c04eb14fbd36384a07
|
def add_train_test_args(parser):
'Add arguments common to train.py and test.py'
parser.add_argument('--name', '-n', type=str, required=True, help='Name to identify training or test run.')
parser.add_argument('--max_ans_len', type=int, default=15, help='Maximum length of a predicted answer.')
parser.add_argument('--num_workers', type=int, default=4, help='Number of sub-processes to use per data loader.')
parser.add_argument('--save_dir', type=str, default='./save/', help='Base directory for saving information.')
parser.add_argument('--batch_size', type=int, default=8, help='Batch size per GPU. Scales automatically when multiple GPUs are available.')
parser.add_argument('--use_squad_v2', type=(lambda s: s.lower().startswith('t')), default=True, help='Whether to use SQuAD 2.0 (unanswerable) questions.')
parser.add_argument('--hidden_size', type=int, default=100, help='Number of features in encoder hidden layers.')
parser.add_argument('--num_visuals', type=int, default=10, help='Number of examples to visualize in TensorBoard.')
parser.add_argument('--load_path', type=str, default=None, help='Path to load as a model checkpoint.')
parser.add_argument('--self_attention', type=str, default=None, help='Write --self-attention Yes to get self-attention.')
|
Add arguments common to train.py and test.py
|
args.py
|
add_train_test_args
|
amelia22974/cs224n-2022-iid-squad
| 0
|
python
|
def add_train_test_args(parser):
parser.add_argument('--name', '-n', type=str, required=True, help='Name to identify training or test run.')
parser.add_argument('--max_ans_len', type=int, default=15, help='Maximum length of a predicted answer.')
parser.add_argument('--num_workers', type=int, default=4, help='Number of sub-processes to use per data loader.')
parser.add_argument('--save_dir', type=str, default='./save/', help='Base directory for saving information.')
parser.add_argument('--batch_size', type=int, default=8, help='Batch size per GPU. Scales automatically when multiple GPUs are available.')
parser.add_argument('--use_squad_v2', type=(lambda s: s.lower().startswith('t')), default=True, help='Whether to use SQuAD 2.0 (unanswerable) questions.')
parser.add_argument('--hidden_size', type=int, default=100, help='Number of features in encoder hidden layers.')
parser.add_argument('--num_visuals', type=int, default=10, help='Number of examples to visualize in TensorBoard.')
parser.add_argument('--load_path', type=str, default=None, help='Path to load as a model checkpoint.')
parser.add_argument('--self_attention', type=str, default=None, help='Write --self-attention Yes to get self-attention.')
|
def add_train_test_args(parser):
parser.add_argument('--name', '-n', type=str, required=True, help='Name to identify training or test run.')
parser.add_argument('--max_ans_len', type=int, default=15, help='Maximum length of a predicted answer.')
parser.add_argument('--num_workers', type=int, default=4, help='Number of sub-processes to use per data loader.')
parser.add_argument('--save_dir', type=str, default='./save/', help='Base directory for saving information.')
parser.add_argument('--batch_size', type=int, default=8, help='Batch size per GPU. Scales automatically when multiple GPUs are available.')
parser.add_argument('--use_squad_v2', type=(lambda s: s.lower().startswith('t')), default=True, help='Whether to use SQuAD 2.0 (unanswerable) questions.')
parser.add_argument('--hidden_size', type=int, default=100, help='Number of features in encoder hidden layers.')
parser.add_argument('--num_visuals', type=int, default=10, help='Number of examples to visualize in TensorBoard.')
parser.add_argument('--load_path', type=str, default=None, help='Path to load as a model checkpoint.')
parser.add_argument('--self_attention', type=str, default=None, help='Write --self-attention Yes to get self-attention.')<|docstring|>Add arguments common to train.py and test.py<|endoftext|>
|
de8512b09d328d78c5afa7b7f698054b5cc6b7b4802324ac1e1287f0afcc58f6
|
def generate_slug(title, max=255):
'\n Create a slug from the title\n '
slug = slugify(title)
unique = random_string_generator()
slug = slug[:max]
while (len(((slug + '-') + unique)) > max):
parts = slug.split('-')
if (len(parts) is 1):
slug = slug[:((max - len(unique)) - 1)]
else:
slug = '-'.join(parts[:(- 1)])
return ((slug + '-') + unique)
|
Create a slug from the title
|
authors/apps/core/utils.py
|
generate_slug
|
Tittoh/blog-API
| 1
|
python
|
def generate_slug(title, max=255):
'\n \n '
slug = slugify(title)
unique = random_string_generator()
slug = slug[:max]
while (len(((slug + '-') + unique)) > max):
parts = slug.split('-')
if (len(parts) is 1):
slug = slug[:((max - len(unique)) - 1)]
else:
slug = '-'.join(parts[:(- 1)])
return ((slug + '-') + unique)
|
def generate_slug(title, max=255):
'\n \n '
slug = slugify(title)
unique = random_string_generator()
slug = slug[:max]
while (len(((slug + '-') + unique)) > max):
parts = slug.split('-')
if (len(parts) is 1):
slug = slug[:((max - len(unique)) - 1)]
else:
slug = '-'.join(parts[:(- 1)])
return ((slug + '-') + unique)<|docstring|>Create a slug from the title<|endoftext|>
|
5cb5bf62c721072dadda5bbf780f0aabd9d0e7e178fec9a9f5ded28dd1afc059
|
@objc.python_method
def italicize(self, x, y, italicAngle=0.0, pivotalY=0.0):
"\n\t\tReturns the italicized position of an NSPoint 'thisPoint'\n\t\tfor a given angle 'italicAngle' and the pivotal height 'pivotalY',\n\t\taround which the italic slanting is executed, usually half x-height.\n\t\tUsage: myPoint = italicize(myPoint,10,xHeight*0.5)\n\t\t"
yOffset = (y - pivotalY)
italicAngle = math.radians(italicAngle)
tangens = math.tan(italicAngle)
horizontalDeviance = (tangens * yOffset)
x += horizontalDeviance
return x
|
Returns the italicized position of an NSPoint 'thisPoint'
for a given angle 'italicAngle' and the pivotal height 'pivotalY',
around which the italic slanting is executed, usually half x-height.
Usage: myPoint = italicize(myPoint,10,xHeight*0.5)
|
Speedlines.glyphsFilter/Contents/Resources/plugin.py
|
italicize
|
mekkablue/Speedlines
| 0
|
python
|
@objc.python_method
def italicize(self, x, y, italicAngle=0.0, pivotalY=0.0):
"\n\t\tReturns the italicized position of an NSPoint 'thisPoint'\n\t\tfor a given angle 'italicAngle' and the pivotal height 'pivotalY',\n\t\taround which the italic slanting is executed, usually half x-height.\n\t\tUsage: myPoint = italicize(myPoint,10,xHeight*0.5)\n\t\t"
yOffset = (y - pivotalY)
italicAngle = math.radians(italicAngle)
tangens = math.tan(italicAngle)
horizontalDeviance = (tangens * yOffset)
x += horizontalDeviance
return x
|
@objc.python_method
def italicize(self, x, y, italicAngle=0.0, pivotalY=0.0):
"\n\t\tReturns the italicized position of an NSPoint 'thisPoint'\n\t\tfor a given angle 'italicAngle' and the pivotal height 'pivotalY',\n\t\taround which the italic slanting is executed, usually half x-height.\n\t\tUsage: myPoint = italicize(myPoint,10,xHeight*0.5)\n\t\t"
yOffset = (y - pivotalY)
italicAngle = math.radians(italicAngle)
tangens = math.tan(italicAngle)
horizontalDeviance = (tangens * yOffset)
x += horizontalDeviance
return x<|docstring|>Returns the italicized position of an NSPoint 'thisPoint'
for a given angle 'italicAngle' and the pivotal height 'pivotalY',
around which the italic slanting is executed, usually half x-height.
Usage: myPoint = italicize(myPoint,10,xHeight*0.5)<|endoftext|>
|
88775dc2a318c9c26b56dc1ed81092dd4ddbe5a49468f76d583fce675b27c7d3
|
@objc.python_method
def __file__(self):
'Please leave this method unchanged'
return __file__
|
Please leave this method unchanged
|
Speedlines.glyphsFilter/Contents/Resources/plugin.py
|
__file__
|
mekkablue/Speedlines
| 0
|
python
|
@objc.python_method
def __file__(self):
return __file__
|
@objc.python_method
def __file__(self):
return __file__<|docstring|>Please leave this method unchanged<|endoftext|>
|
d5c37db41e56054ddb095fd7b83e3c52537d5b17937accce4996002d3ba51df1
|
def setUp(self):
'Set up test fixtures, if any.'
self.pairs = ('BTC_USD', 'BTC_RUB', 'USD_RUB')
self.limit = 200
self.api = PublicApi()
|
Set up test fixtures, if any.
|
tests/test_public.py
|
setUp
|
victorusachev/ExmoApi
| 1
|
python
|
def setUp(self):
self.pairs = ('BTC_USD', 'BTC_RUB', 'USD_RUB')
self.limit = 200
self.api = PublicApi()
|
def setUp(self):
self.pairs = ('BTC_USD', 'BTC_RUB', 'USD_RUB')
self.limit = 200
self.api = PublicApi()<|docstring|>Set up test fixtures, if any.<|endoftext|>
|
60d3d34bbe069a84e0f0fb75d5c4c65326cfedfbd4588c5e03902405e45b8593
|
def tearDown(self):
'Tear down test fixtures, if any.'
|
Tear down test fixtures, if any.
|
tests/test_public.py
|
tearDown
|
victorusachev/ExmoApi
| 1
|
python
|
def tearDown(self):
|
def tearDown(self):
<|docstring|>Tear down test fixtures, if any.<|endoftext|>
|
c8fe8895f0c1a855dbd8d81ed53e79e9963508df50f9ef91fb140de3eefb9e81
|
def test_query_trades(self):
'Test query `trades`'
trades = self.api.trades(self.pairs)
self.assertIsInstance(trades, dict)
self.assertTrue(set(trades.keys()), set(self.pairs))
|
Test query `trades`
|
tests/test_public.py
|
test_query_trades
|
victorusachev/ExmoApi
| 1
|
python
|
def test_query_trades(self):
trades = self.api.trades(self.pairs)
self.assertIsInstance(trades, dict)
self.assertTrue(set(trades.keys()), set(self.pairs))
|
def test_query_trades(self):
trades = self.api.trades(self.pairs)
self.assertIsInstance(trades, dict)
self.assertTrue(set(trades.keys()), set(self.pairs))<|docstring|>Test query `trades`<|endoftext|>
|
b6e5ed712c5e3c28ebb633c033b2feb894fd21fdb16929c6c85ea1ba6bcfffba
|
def test_query_order_book(self):
'Test query `order_book`'
order_book = self.api.order_book(pairs=self.pairs, limit=self.limit)
self.assertIsInstance(order_book, dict)
self.assertTrue(set(order_book.keys()), set(self.pairs))
self.assertTrue(all(map((lambda el: (len(el.get('ask')) <= self.limit >= len(el.get('bid')))), order_book.values())))
|
Test query `order_book`
|
tests/test_public.py
|
test_query_order_book
|
victorusachev/ExmoApi
| 1
|
python
|
def test_query_order_book(self):
order_book = self.api.order_book(pairs=self.pairs, limit=self.limit)
self.assertIsInstance(order_book, dict)
self.assertTrue(set(order_book.keys()), set(self.pairs))
self.assertTrue(all(map((lambda el: (len(el.get('ask')) <= self.limit >= len(el.get('bid')))), order_book.values())))
|
def test_query_order_book(self):
order_book = self.api.order_book(pairs=self.pairs, limit=self.limit)
self.assertIsInstance(order_book, dict)
self.assertTrue(set(order_book.keys()), set(self.pairs))
self.assertTrue(all(map((lambda el: (len(el.get('ask')) <= self.limit >= len(el.get('bid')))), order_book.values())))<|docstring|>Test query `order_book`<|endoftext|>
|
9a13f72fc8dbe0acd72eaa3b8915404df02aeebfb51a2933cb8bbeb34698dce8
|
def test_query_ticker(self):
'Test query `ticker`'
ticker = self.api.ticker()
self.assertIsInstance(ticker, dict)
self.assertTrue(set(self.pairs).issubset(set(ticker.keys())))
|
Test query `ticker`
|
tests/test_public.py
|
test_query_ticker
|
victorusachev/ExmoApi
| 1
|
python
|
def test_query_ticker(self):
ticker = self.api.ticker()
self.assertIsInstance(ticker, dict)
self.assertTrue(set(self.pairs).issubset(set(ticker.keys())))
|
def test_query_ticker(self):
ticker = self.api.ticker()
self.assertIsInstance(ticker, dict)
self.assertTrue(set(self.pairs).issubset(set(ticker.keys())))<|docstring|>Test query `ticker`<|endoftext|>
|
663158a3227f532546fb2d5aa344ce2276159a22ceb5544b3e70d9a11cb704d0
|
def test_query_pair_settings(self):
'Test query `pair_settings`'
pair_settings = self.api.pair_settings()
self.assertIsInstance(pair_settings, dict)
self.assertTrue(set(self.pairs).issubset(set(pair_settings.keys())))
|
Test query `pair_settings`
|
tests/test_public.py
|
test_query_pair_settings
|
victorusachev/ExmoApi
| 1
|
python
|
def test_query_pair_settings(self):
pair_settings = self.api.pair_settings()
self.assertIsInstance(pair_settings, dict)
self.assertTrue(set(self.pairs).issubset(set(pair_settings.keys())))
|
def test_query_pair_settings(self):
pair_settings = self.api.pair_settings()
self.assertIsInstance(pair_settings, dict)
self.assertTrue(set(self.pairs).issubset(set(pair_settings.keys())))<|docstring|>Test query `pair_settings`<|endoftext|>
|
4a6a64cd18a5604a643eb68136176318994118dc66ee78e110279c3b014d494b
|
def test_query_currency(self):
'Test query `currency`'
all_currencies = self.api.currency()
self.assertIsInstance(all_currencies, list)
self.assertTrue((len(all_currencies) > 0))
currencies = {currency for pair in self.pairs for currency in pair.split('_')}
self.assertTrue(currencies.issubset(all_currencies))
|
Test query `currency`
|
tests/test_public.py
|
test_query_currency
|
victorusachev/ExmoApi
| 1
|
python
|
def test_query_currency(self):
all_currencies = self.api.currency()
self.assertIsInstance(all_currencies, list)
self.assertTrue((len(all_currencies) > 0))
currencies = {currency for pair in self.pairs for currency in pair.split('_')}
self.assertTrue(currencies.issubset(all_currencies))
|
def test_query_currency(self):
all_currencies = self.api.currency()
self.assertIsInstance(all_currencies, list)
self.assertTrue((len(all_currencies) > 0))
currencies = {currency for pair in self.pairs for currency in pair.split('_')}
self.assertTrue(currencies.issubset(all_currencies))<|docstring|>Test query `currency`<|endoftext|>
|
d1f863a47f0d5285d0c17ef0583064959ebe8f37be07b5c3fa4d47c2ac4c0e7b
|
def hash(obj, hasher=None, hash_name='md5', coerce_mmap=True):
" Quick calculation of a hash to identify uniquely Python objects\n containing numpy arrays. The difference with this hash and joblib\n is that it tries to hash different mutable objects with the same\n values to the same hash.\n\n\n Parameters\n -----------\n hash_name: 'md5' or 'sha1'\n Hashing algorithm used. sha1 is supposedly safer, but md5 is\n faster.\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n "
if (hasher is None):
if ('numpy' in sys.modules):
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj)
|
Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays. The difference with this hash and joblib
is that it tries to hash different mutable objects with the same
values to the same hash.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
|
provenance/hashing.py
|
hash
|
nitramsivart/provenance
| 30
|
python
|
def hash(obj, hasher=None, hash_name='md5', coerce_mmap=True):
" Quick calculation of a hash to identify uniquely Python objects\n containing numpy arrays. The difference with this hash and joblib\n is that it tries to hash different mutable objects with the same\n values to the same hash.\n\n\n Parameters\n -----------\n hash_name: 'md5' or 'sha1'\n Hashing algorithm used. sha1 is supposedly safer, but md5 is\n faster.\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n "
if (hasher is None):
if ('numpy' in sys.modules):
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj)
|
def hash(obj, hasher=None, hash_name='md5', coerce_mmap=True):
" Quick calculation of a hash to identify uniquely Python objects\n containing numpy arrays. The difference with this hash and joblib\n is that it tries to hash different mutable objects with the same\n values to the same hash.\n\n\n Parameters\n -----------\n hash_name: 'md5' or 'sha1'\n Hashing algorithm used. sha1 is supposedly safer, but md5 is\n faster.\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n "
if (hasher is None):
if ('numpy' in sys.modules):
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj)<|docstring|>Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays. The difference with this hash and joblib
is that it tries to hash different mutable objects with the same
values to the same hash.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray<|endoftext|>
|
99bfe9c51aca5681ed148ca280945b04a043af7ed2c1f19985dbf8b03bef3aba
|
def file_hash(filename, hash_name='md5'):
'Streams the bytes of the given file through either md5 or sha1\n and returns the hexdigest.\n '
if (hash_name not in set(['md5', 'sha1'])):
raise ValueError('hashname must be "md5" or "sha1"')
hasher = (hashlib.md5() if (hash_name == 'md5') else hashlib.sha1())
with open(filename, 'rb') as f:
for chunk in iter((lambda : f.read(4096)), b''):
hasher.update(chunk)
return hasher.hexdigest()
|
Streams the bytes of the given file through either md5 or sha1
and returns the hexdigest.
|
provenance/hashing.py
|
file_hash
|
nitramsivart/provenance
| 30
|
python
|
def file_hash(filename, hash_name='md5'):
'Streams the bytes of the given file through either md5 or sha1\n and returns the hexdigest.\n '
if (hash_name not in set(['md5', 'sha1'])):
raise ValueError('hashname must be "md5" or "sha1"')
hasher = (hashlib.md5() if (hash_name == 'md5') else hashlib.sha1())
with open(filename, 'rb') as f:
for chunk in iter((lambda : f.read(4096)), b):
hasher.update(chunk)
return hasher.hexdigest()
|
def file_hash(filename, hash_name='md5'):
'Streams the bytes of the given file through either md5 or sha1\n and returns the hexdigest.\n '
if (hash_name not in set(['md5', 'sha1'])):
raise ValueError('hashname must be "md5" or "sha1"')
hasher = (hashlib.md5() if (hash_name == 'md5') else hashlib.sha1())
with open(filename, 'rb') as f:
for chunk in iter((lambda : f.read(4096)), b):
hasher.update(chunk)
return hasher.hexdigest()<|docstring|>Streams the bytes of the given file through either md5 or sha1
and returns the hexdigest.<|endoftext|>
|
a228fe4a733633821e86e10617c6a9dbab15a6ff6035a29c7bec56462d3d5bbf
|
def __init__(self, hash_name='md5', coerce_mmap=True):
'\n Parameters\n ----------\n hash_name: string\n The hash algorithm to be used\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n objects.\n '
self.coerce_mmap = coerce_mmap
self.chunk_size = ((200 * 1024) * 1024)
Hasher.__init__(self, hash_name=hash_name)
import numpy as np
self.np = np
|
Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.
|
provenance/hashing.py
|
__init__
|
nitramsivart/provenance
| 30
|
python
|
def __init__(self, hash_name='md5', coerce_mmap=True):
'\n Parameters\n ----------\n hash_name: string\n The hash algorithm to be used\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n objects.\n '
self.coerce_mmap = coerce_mmap
self.chunk_size = ((200 * 1024) * 1024)
Hasher.__init__(self, hash_name=hash_name)
import numpy as np
self.np = np
|
def __init__(self, hash_name='md5', coerce_mmap=True):
'\n Parameters\n ----------\n hash_name: string\n The hash algorithm to be used\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n objects.\n '
self.coerce_mmap = coerce_mmap
self.chunk_size = ((200 * 1024) * 1024)
Hasher.__init__(self, hash_name=hash_name)
import numpy as np
self.np = np<|docstring|>Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.<|endoftext|>
|
f1e8a5d0ff2f8aa20768178548e1a85e261ac174d69316b8af29d49e46c9990b
|
def save(self, obj):
' Subclass the save method, to hash ndarray subclass, rather\n than pickling them. Off course, this is a total abuse of\n the Pickler class.\n '
if (isinstance(obj, self.np.ndarray) and (not obj.dtype.hasobject)):
obj_bytes = (obj.dtype.itemsize * obj.size)
if (obj_bytes > self.chunk_size):
try:
copy = obj[:]
copy.shape = (copy.size,)
except AttributeError as e:
if (e.args[0] != 'incompatible shape for a non-contiguous array'):
raise e
copy = obj.reshape((obj.size,))
i = 0
size = copy.size
typed_chunk_size = (self.chunk_size // copy.dtype.itemsize)
while (i < size):
end = min((i + typed_chunk_size), size)
self.hash_array(copy[i:end])
i = end
else:
self.hash_array(obj)
if (self.coerce_mmap and isinstance(obj, self.np.memmap)):
klass = self.np.ndarray
else:
klass = obj.__class__
obj = (klass, ('HASHED', obj.dtype, obj.shape))
elif isinstance(obj, self.np.dtype):
klass = obj.__class__
obj = (klass, ('HASHED', obj.descr))
Hasher.save(self, obj)
|
Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.
|
provenance/hashing.py
|
save
|
nitramsivart/provenance
| 30
|
python
|
def save(self, obj):
' Subclass the save method, to hash ndarray subclass, rather\n than pickling them. Off course, this is a total abuse of\n the Pickler class.\n '
if (isinstance(obj, self.np.ndarray) and (not obj.dtype.hasobject)):
obj_bytes = (obj.dtype.itemsize * obj.size)
if (obj_bytes > self.chunk_size):
try:
copy = obj[:]
copy.shape = (copy.size,)
except AttributeError as e:
if (e.args[0] != 'incompatible shape for a non-contiguous array'):
raise e
copy = obj.reshape((obj.size,))
i = 0
size = copy.size
typed_chunk_size = (self.chunk_size // copy.dtype.itemsize)
while (i < size):
end = min((i + typed_chunk_size), size)
self.hash_array(copy[i:end])
i = end
else:
self.hash_array(obj)
if (self.coerce_mmap and isinstance(obj, self.np.memmap)):
klass = self.np.ndarray
else:
klass = obj.__class__
obj = (klass, ('HASHED', obj.dtype, obj.shape))
elif isinstance(obj, self.np.dtype):
klass = obj.__class__
obj = (klass, ('HASHED', obj.descr))
Hasher.save(self, obj)
|
def save(self, obj):
' Subclass the save method, to hash ndarray subclass, rather\n than pickling them. Off course, this is a total abuse of\n the Pickler class.\n '
if (isinstance(obj, self.np.ndarray) and (not obj.dtype.hasobject)):
obj_bytes = (obj.dtype.itemsize * obj.size)
if (obj_bytes > self.chunk_size):
try:
copy = obj[:]
copy.shape = (copy.size,)
except AttributeError as e:
if (e.args[0] != 'incompatible shape for a non-contiguous array'):
raise e
copy = obj.reshape((obj.size,))
i = 0
size = copy.size
typed_chunk_size = (self.chunk_size // copy.dtype.itemsize)
while (i < size):
end = min((i + typed_chunk_size), size)
self.hash_array(copy[i:end])
i = end
else:
self.hash_array(obj)
if (self.coerce_mmap and isinstance(obj, self.np.memmap)):
klass = self.np.ndarray
else:
klass = obj.__class__
obj = (klass, ('HASHED', obj.dtype, obj.shape))
elif isinstance(obj, self.np.dtype):
klass = obj.__class__
obj = (klass, ('HASHED', obj.descr))
Hasher.save(self, obj)<|docstring|>Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.<|endoftext|>
|
9c96256f7dec6213b642bdeeeaa9bbb6148621b5fb08de5701e36be97f6b2bf4
|
def __init__(self, in_channels_list, middle_channels, out_channels, conv_block):
'\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n will be fed\n out_channels (int): number of channels of the FPN representation\n top_blocks (nn.Module or None): if provided, an extra operation will\n be performed on the output of the last (smallest resolution)\n FPN output, and the result will extend the result list\n '
super(RGCN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
self.gcn_blocks = []
self.res_blocks = []
self.res2_blocks = []
self.sem_final_block = conv_with_kaiming_uniform()(middle_channels, out_channels, 1)
for (idx, in_channels) in enumerate(in_channels_list, 1):
inner_block = 'fpn_inner{}'.format(idx)
layer_block = 'fpn_layer{}'.format(idx)
gcn_block = 'gcn_layer{}'.format(idx)
res_block = 'res_layer{}'.format(idx)
res2_block = 'res2_layer{}'.format(idx)
gcn_block_module = GCNBlock(in_channels, middle_channels)
res_block_module = RESBlock(middle_channels, middle_channels, conv_block)
res2_block_module = RESBlock(middle_channels, middle_channels, conv_block)
inner_block_module = nn.Conv2d(in_channels, out_channels, 1)
layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.add_module(gcn_block, gcn_block_module)
self.add_module(res_block, res_block_module)
self.add_module(res2_block, res2_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.gcn_blocks.append(gcn_block)
self.res_blocks.append(res_block)
self.res2_blocks.append(res2_block)
|
Arguments:
in_channels_list (list[int]): number of channels for each feature map that
will be fed
out_channels (int): number of channels of the FPN representation
top_blocks (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list
|
maskrcnn_benchmark/modeling/sem_branch/mcgcn.py
|
__init__
|
dliu5812/PFFNet
| 8
|
python
|
def __init__(self, in_channels_list, middle_channels, out_channels, conv_block):
'\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n will be fed\n out_channels (int): number of channels of the FPN representation\n top_blocks (nn.Module or None): if provided, an extra operation will\n be performed on the output of the last (smallest resolution)\n FPN output, and the result will extend the result list\n '
super(RGCN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
self.gcn_blocks = []
self.res_blocks = []
self.res2_blocks = []
self.sem_final_block = conv_with_kaiming_uniform()(middle_channels, out_channels, 1)
for (idx, in_channels) in enumerate(in_channels_list, 1):
inner_block = 'fpn_inner{}'.format(idx)
layer_block = 'fpn_layer{}'.format(idx)
gcn_block = 'gcn_layer{}'.format(idx)
res_block = 'res_layer{}'.format(idx)
res2_block = 'res2_layer{}'.format(idx)
gcn_block_module = GCNBlock(in_channels, middle_channels)
res_block_module = RESBlock(middle_channels, middle_channels, conv_block)
res2_block_module = RESBlock(middle_channels, middle_channels, conv_block)
inner_block_module = nn.Conv2d(in_channels, out_channels, 1)
layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.add_module(gcn_block, gcn_block_module)
self.add_module(res_block, res_block_module)
self.add_module(res2_block, res2_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.gcn_blocks.append(gcn_block)
self.res_blocks.append(res_block)
self.res2_blocks.append(res2_block)
|
def __init__(self, in_channels_list, middle_channels, out_channels, conv_block):
'\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n will be fed\n out_channels (int): number of channels of the FPN representation\n top_blocks (nn.Module or None): if provided, an extra operation will\n be performed on the output of the last (smallest resolution)\n FPN output, and the result will extend the result list\n '
super(RGCN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
self.gcn_blocks = []
self.res_blocks = []
self.res2_blocks = []
self.sem_final_block = conv_with_kaiming_uniform()(middle_channels, out_channels, 1)
for (idx, in_channels) in enumerate(in_channels_list, 1):
inner_block = 'fpn_inner{}'.format(idx)
layer_block = 'fpn_layer{}'.format(idx)
gcn_block = 'gcn_layer{}'.format(idx)
res_block = 'res_layer{}'.format(idx)
res2_block = 'res2_layer{}'.format(idx)
gcn_block_module = GCNBlock(in_channels, middle_channels)
res_block_module = RESBlock(middle_channels, middle_channels, conv_block)
res2_block_module = RESBlock(middle_channels, middle_channels, conv_block)
inner_block_module = nn.Conv2d(in_channels, out_channels, 1)
layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.add_module(gcn_block, gcn_block_module)
self.add_module(res_block, res_block_module)
self.add_module(res2_block, res2_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.gcn_blocks.append(gcn_block)
self.res_blocks.append(res_block)
self.res2_blocks.append(res2_block)<|docstring|>Arguments:
in_channels_list (list[int]): number of channels for each feature map that
will be fed
out_channels (int): number of channels of the FPN representation
top_blocks (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list<|endoftext|>
|
f57a72c6a83cbcf7bfe8820c80d3d174d56b5c04877575e876b5761fb8c10bfc
|
def forward(self, x):
'\n Arguments:\n x (list[Tensor]): feature maps for each feature level.\n Returns:\n results (tuple[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n '
last_inner1 = getattr(self, self.gcn_blocks[(- 1)])(x[(- 1)])
last_inner = getattr(self, self.res2_blocks[(- 1)])(last_inner1)
results = []
results.append(last_inner)
idx = 1
for (feature, inner_block, layer_block) in zip(x[:(- 1)][::(- 1)], self.inner_blocks[:(- 1)][::(- 1)], self.layer_blocks[:(- 1)][::(- 1)]):
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode='nearest')
idx = (idx + 1)
inner_lateral = getattr(self, self.gcn_blocks[(- idx)])(feature)
inner_lateral2 = getattr(self, self.res2_blocks[(- idx)])(inner_lateral)
last_inner_pre = (inner_lateral2 + inner_top_down)
last_inner = getattr(self, self.res_blocks[((- idx) + 1)])(last_inner_pre)
results.insert(0, last_inner)
out_25 = results[0]
sem_map_25 = self.sem_final_block(out_25)
sem_map = F.interpolate(sem_map_25, scale_factor=4, mode='nearest')
results.insert(0, sem_map)
return sem_map
|
Arguments:
x (list[Tensor]): feature maps for each feature level.
Returns:
results (tuple[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.
|
maskrcnn_benchmark/modeling/sem_branch/mcgcn.py
|
forward
|
dliu5812/PFFNet
| 8
|
python
|
def forward(self, x):
'\n Arguments:\n x (list[Tensor]): feature maps for each feature level.\n Returns:\n results (tuple[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n '
last_inner1 = getattr(self, self.gcn_blocks[(- 1)])(x[(- 1)])
last_inner = getattr(self, self.res2_blocks[(- 1)])(last_inner1)
results = []
results.append(last_inner)
idx = 1
for (feature, inner_block, layer_block) in zip(x[:(- 1)][::(- 1)], self.inner_blocks[:(- 1)][::(- 1)], self.layer_blocks[:(- 1)][::(- 1)]):
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode='nearest')
idx = (idx + 1)
inner_lateral = getattr(self, self.gcn_blocks[(- idx)])(feature)
inner_lateral2 = getattr(self, self.res2_blocks[(- idx)])(inner_lateral)
last_inner_pre = (inner_lateral2 + inner_top_down)
last_inner = getattr(self, self.res_blocks[((- idx) + 1)])(last_inner_pre)
results.insert(0, last_inner)
out_25 = results[0]
sem_map_25 = self.sem_final_block(out_25)
sem_map = F.interpolate(sem_map_25, scale_factor=4, mode='nearest')
results.insert(0, sem_map)
return sem_map
|
def forward(self, x):
'\n Arguments:\n x (list[Tensor]): feature maps for each feature level.\n Returns:\n results (tuple[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n '
last_inner1 = getattr(self, self.gcn_blocks[(- 1)])(x[(- 1)])
last_inner = getattr(self, self.res2_blocks[(- 1)])(last_inner1)
results = []
results.append(last_inner)
idx = 1
for (feature, inner_block, layer_block) in zip(x[:(- 1)][::(- 1)], self.inner_blocks[:(- 1)][::(- 1)], self.layer_blocks[:(- 1)][::(- 1)]):
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode='nearest')
idx = (idx + 1)
inner_lateral = getattr(self, self.gcn_blocks[(- idx)])(feature)
inner_lateral2 = getattr(self, self.res2_blocks[(- idx)])(inner_lateral)
last_inner_pre = (inner_lateral2 + inner_top_down)
last_inner = getattr(self, self.res_blocks[((- idx) + 1)])(last_inner_pre)
results.insert(0, last_inner)
out_25 = results[0]
sem_map_25 = self.sem_final_block(out_25)
sem_map = F.interpolate(sem_map_25, scale_factor=4, mode='nearest')
results.insert(0, sem_map)
return sem_map<|docstring|>Arguments:
x (list[Tensor]): feature maps for each feature level.
Returns:
results (tuple[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.